code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class A__ ( _lowerCamelCase):
A_ : Any = 'blip_text_model'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_24 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1_02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , sep_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : List[str] = encoder_hidden_size
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : Optional[int] = projection_dim
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = is_decoder
__lowerCAmelCase : Optional[Any] = use_cache
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__lowerCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'blip_vision_model'
def __init__( self , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3_84 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1E-10 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : str = projection_dim
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : List[str] = attention_dropout
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Dict = hidden_act
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__lowerCAmelCase : str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'blip'
A_ : int = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2.6592 , _SCREAMING_SNAKE_CASE=2_56 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
__lowerCAmelCase : Tuple = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__lowerCAmelCase : List[str] = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__lowerCAmelCase : List[str] = BlipTextConfig(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BlipVisionConfig(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.vision_config.hidden_size
__lowerCAmelCase : List[str] = projection_dim
__lowerCAmelCase : Union[str, Any] = logit_scale_init_value
__lowerCAmelCase : Tuple = 1.0
__lowerCAmelCase : Optional[int] = 0.02
__lowerCAmelCase : int = image_text_hidden_size
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : List[str] = self.text_config.to_dict()
__lowerCAmelCase : List[str] = self.vision_config.to_dict()
__lowerCAmelCase : Dict = self.__class__.model_type
return output | 86 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
_UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 100
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 100
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCamelCase = {"""UserAgent""": UserAgent().random}
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = F'''https://www.instagram.com/{username}/'''
UpperCAmelCase = self.get_json()
def _UpperCamelCase ( self ):
UpperCAmelCase = requests.get(self.url ,headers=A ).text
UpperCAmelCase = BeautifulSoup(A ,"""html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def _UpperCamelCase ( self ):
return self.user_data["username"]
@property
def _UpperCamelCase ( self ):
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self ):
return self.user_data["biography"]
@property
def _UpperCamelCase ( self ):
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self ):
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self ):
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self ):
return self.user_data["is_private"]
def _a ( _snake_case = "github" ):
"""simple docstring"""
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(_snake_case )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _snake_case )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = InstagramUser("""github""")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 234 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _a ( __a ):
__a : Any = """SpeechT5FeatureExtractor"""
__a : Optional[Any] = """SpeechT5Tokenizer"""
def __init__( self : List[str] , lowercase : Optional[Any] , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase , lowercase )
def __call__( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = kwargs.pop('''audio''' , lowercase )
UpperCAmelCase = kwargs.pop('''text''' , lowercase )
UpperCAmelCase = kwargs.pop('''text_target''' , lowercase )
UpperCAmelCase = kwargs.pop('''audio_target''' , lowercase )
UpperCAmelCase = kwargs.pop('''sampling_rate''' , lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
UpperCAmelCase = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
elif text is not None:
UpperCAmelCase = self.tokenizer(lowercase , **lowercase )
else:
UpperCAmelCase = None
if audio_target is not None:
UpperCAmelCase = self.feature_extractor(audio_target=lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
UpperCAmelCase = targets['''input_values''']
elif text_target is not None:
UpperCAmelCase = self.tokenizer(lowercase , **lowercase )
UpperCAmelCase = targets['''input_ids''']
else:
UpperCAmelCase = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase = labels
UpperCAmelCase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase = decoder_attention_mask
return inputs
def A ( self : Dict , *lowercase : List[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = kwargs.pop('''input_values''' , lowercase )
UpperCAmelCase = kwargs.pop('''input_ids''' , lowercase )
UpperCAmelCase = kwargs.pop('''labels''' , lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
UpperCAmelCase = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
elif input_ids is not None:
UpperCAmelCase = self.tokenizer.pad(lowercase , **lowercase )
else:
UpperCAmelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase , lowercase ) and "input_ids" in labels[0]):
UpperCAmelCase = self.tokenizer.pad(lowercase , **lowercase )
UpperCAmelCase = targets['''input_ids''']
else:
UpperCAmelCase = self.feature_extractor.feature_size
UpperCAmelCase = self.feature_extractor.num_mel_bins
UpperCAmelCase = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
UpperCAmelCase = feature_size_hack
UpperCAmelCase = targets['''input_values''']
else:
UpperCAmelCase = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase = labels
UpperCAmelCase = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase = decoder_attention_mask
return inputs
def A ( self : Dict , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : Optional[Any] , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
| 34 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCamelCase = ["""text""", """image""", """audio"""]
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_snake_case , _snake_case ):
inputs.append(create_inputs(_snake_case ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for output in outputs:
if isinstance(_snake_case , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_snake_case , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_snake_case , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCamelCase__ :
def _UpperCamelCase ( self ):
self.assertTrue(hasattr(self.tool ,"""inputs""" ) )
self.assertTrue(hasattr(self.tool ,"""outputs""" ) )
UpperCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input ,A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*A )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase = [outputs]
self.assertListEqual(output_types(A ) ,self.tool.outputs )
def _UpperCamelCase ( self ):
self.assertTrue(hasattr(self.tool ,"""description""" ) )
self.assertTrue(hasattr(self.tool ,"""default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = self.tool(*A )
if not isinstance(A ,A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(A ) ,len(self.tool.outputs ) )
for output, output_type in zip(A ,self.tool.outputs ):
UpperCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(A ,A ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = create_inputs(self.tool.inputs )
UpperCAmelCase = []
for _input, input_type in zip(A ,self.tool.inputs ):
if isinstance(A ,A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase = self.tool(*A )
if not isinstance(A ,A ):
UpperCAmelCase = [outputs]
self.assertEqual(len(A ) ,len(self.tool.outputs ) )
| 234 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = size if size is not None else {"height": 384, "width": 384}
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(lowercase , default_to_square=lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
A__ = (size["height"], size["width"])
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = size if size is not None else self.size
A__ = get_size_dict(lowercase , default_to_square=lowercase )
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A__ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A__ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A__ = BatchFeature(data={"pixel_values": images} , tensor_type=lowercase )
return encoded_outputs
| 68 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =ShapEPipeline
UpperCamelCase__ : Optional[Any] =["""prompt"""]
UpperCamelCase__ : List[Any] =["""prompt"""]
UpperCamelCase__ : Any =[
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ : Optional[Any] =False
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self ):
"""simple docstring"""
return 8
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Dict ={
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__UpperCamelCase : str =PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : str ={
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__UpperCamelCase : int =ShapERenderer(**lowerCamelCase__ )
return model
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.dummy_prior
__UpperCamelCase : List[str] =self.dummy_text_encoder
__UpperCamelCase : Union[str, Any] =self.dummy_tokenizer
__UpperCamelCase : int =self.dummy_renderer
__UpperCamelCase : Union[str, Any] =HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
__UpperCamelCase : int ={
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : int =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Any =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Any ={
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] ='cpu'
__UpperCamelCase : Union[str, Any] =self.get_dummy_components()
__UpperCamelCase : List[str] =self.pipeline_class(**lowerCamelCase__ )
__UpperCamelCase : Any =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__UpperCamelCase : List[str] =output.images[0]
__UpperCamelCase : str =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCamelCase : List[Any] =np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =torch_device == 'cpu'
__UpperCamelCase : Dict =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.get_dummy_components()
__UpperCamelCase : Optional[int] =self.pipeline_class(**lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =1
__UpperCamelCase : Optional[int] =2
__UpperCamelCase : List[str] =self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCamelCase : str =batch_size * [inputs[key]]
__UpperCamelCase : Union[str, Any] =pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__UpperCamelCase : int =ShapEPipeline.from_pretrained('openai/shap-e' )
__UpperCamelCase : Optional[Any] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__UpperCamelCase : Optional[int] =pipe(
'a shark' , generator=lowerCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 245 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ :Any = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A_ :Dict = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :int = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Tuple = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
A_ :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
A_ :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
A_ :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
A_ :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
A_ :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
A_ :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
A_ :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
A_ :str = ''''''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[str]:
assert ReadMe.from_string(a_ ,a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> int:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
__UpperCamelCase : List[Any] =ReadMe.from_string(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Union[str, Any]:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Tuple:
ReadMe.from_string(a_ ,a_ ,suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Dict =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[Any] =ReadMe.from_readme(a_ ,a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Any =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
__UpperCamelCase : List[str] =ReadMe.from_readme(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
ReadMe.from_readme(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Union[str, Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_ ,a_ ,suppress_parsing_errors=a_ )
| 245 | 1 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( _lowerCAmelCase : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
lowercase__ : List[str] = []
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple[int, ...] ):
'''simple docstring'''
lowercase__ : List[Any] = []
for d in reversed(_UpperCamelCase ):
idx.append(flat_idx % d )
lowercase__ : Union[str, Any] = flat_idx // d
return tuple(reversed(_UpperCamelCase ) )
@torch.jit.ignore
def a_ ( _lowerCAmelCase : Sequence[int] , _lowerCAmelCase : Sequence[int] , _lowerCAmelCase : Sequence[int] , _lowerCAmelCase : Optional[Sequence[bool]] = None , _lowerCAmelCase : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(_lowerCAmelCase : List[bool] ) -> None:
lowercase__ : Tuple = True
for i in range(len(_UpperCamelCase ) ):
lowercase__ : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ : Any = l[reversed_idx]
if start_edges is None:
lowercase__ : Dict = [s == 0 for s in start]
reduce_edge_list(_UpperCamelCase )
if end_edges is None:
lowercase__ : Optional[int] = [e == (d - 1) for e, d in zip(_UpperCamelCase , _UpperCamelCase )]
reduce_edge_list(_UpperCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_UpperCamelCase ) == 0:
return [()]
elif len(_UpperCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowercase__ : Optional[int] = []
lowercase__ : Dict = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_UpperCamelCase , _UpperCamelCase ):
if s == e:
path_list.append(slice(_UpperCamelCase , s + 1 ) )
else:
break
lowercase__ : Optional[Any] = tuple(_UpperCamelCase )
lowercase__ : Optional[Any] = len(_UpperCamelCase )
# start == end, and we're done
if divergence_idx == len(_UpperCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : Any = start[divergence_idx]
return tuple(
path + (slice(_UpperCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(_UpperCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase__ : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( _lowerCAmelCase : torch.Tensor , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : str = t.shape[:no_batch_dims]
lowercase__ : Tuple = list(_flat_idx_to_idx(_UpperCamelCase , _UpperCamelCase ) )
# _get_minimal_slice_set is inclusive
lowercase__ : Union[str, Any] = list(_flat_idx_to_idx(flat_end - 1 , _UpperCamelCase ) )
# Get an ordered list of slices to perform
lowercase__ : Union[str, Any] = _get_minimal_slice_set(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
lowercase__ : str = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( _lowerCAmelCase : Callable , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool = False , _lowerCAmelCase : Any = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
if not (len(_UpperCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
lowercase__ : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(_UpperCamelCase )]
lowercase__ : int = tuple([max(_UpperCamelCase ) for s in zip(*_UpperCamelCase )] )
def _prep_inputs(_lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase__ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase__ : Any = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowercase__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase__ : int = tensor_tree_map(_prep_inputs , _UpperCamelCase )
lowercase__ : List[Any] = None
if _out is not None:
lowercase__ : Optional[int] = tensor_tree_map(lambda _lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowercase__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ : List[str] = 0
lowercase__ : str = prepped_outputs
for _ in range(_UpperCamelCase ):
# Chunk the input
if not low_mem:
lowercase__ : Union[str, Any] = _select_chunk
else:
lowercase__ : Optional[int] = partial(
_chunk_slice , flat_start=_UpperCamelCase , flat_end=min(_UpperCamelCase , i + chunk_size ) , no_batch_dims=len(_UpperCamelCase ) , )
lowercase__ : List[str] = tensor_tree_map(_UpperCamelCase , _UpperCamelCase )
# Run the layer on the chunk
lowercase__ : Optional[int] = layer(**_UpperCamelCase )
# Allocate space for the output
if out is None:
lowercase__ : Optional[Any] = tensor_tree_map(lambda _lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _UpperCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_UpperCamelCase , _UpperCamelCase ):
def assign(_lowerCAmelCase : dict , _lowerCAmelCase : dict ) -> None:
for k, v in da.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
assign(_UpperCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ : int = da[k]
assign(_UpperCamelCase , _UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
for xa, xa in zip(_UpperCamelCase , _UpperCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ : Optional[Any] = xa
elif isinstance(_UpperCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ : int = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowercase__ : Optional[Any] = tensor_tree_map(lambda _lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , _UpperCamelCase )
return out
class UpperCAmelCase_ :
def __init__( self , a = 5_1_2 , ) -> int:
lowercase__ : Dict = max_chunk_size
lowercase__ : Optional[Any] = None
lowercase__ : Optional[int] = None
def _UpperCAmelCase ( self , a , a , a ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ : Any = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ : Optional[Any] = [c for c in candidates if c > min_chunk_size]
lowercase__ : Union[str, Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(a ) -> bool:
try:
with torch.no_grad():
fn(*_a , chunk_size=_a )
return True
except RuntimeError:
return False
lowercase__ : List[str] = 0
lowercase__ : int = len(_a ) - 1
while i > min_viable_chunk_size_index:
lowercase__ : List[Any] = test_chunk_size(candidates[i] )
if not viable:
lowercase__ : List[str] = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ : List[str] = i
lowercase__ : str = (i + len(_a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _UpperCAmelCase ( self , a , a ) -> bool:
lowercase__ : List[Any] = True
for aa, aa in zip(_a , _a ):
assert type(_a ) == type(_a )
if isinstance(_a , (list, tuple) ):
consistent &= self._compare_arg_caches(_a , _a )
elif isinstance(_a , _a ):
lowercase__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda a : x[0] )]
lowercase__ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda a : x[0] )]
consistent &= self._compare_arg_caches(_a , _a )
else:
consistent &= aa == aa
return consistent
def _UpperCAmelCase ( self , a , a , a , ) -> int:
lowercase__ : List[Any] = True
lowercase__ : int = tree_map(lambda a : a.shape if isinstance(_a , torch.Tensor ) else a , _a , _a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_a )
lowercase__ : List[Any] = self._compare_arg_caches(self.cached_arg_data , _a )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ : str = False
if not consistent:
lowercase__ : Optional[int] = self._determine_favorable_chunk_size(
_a , _a , _a , )
lowercase__ : Any = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 77 |
'''simple docstring'''
import copy
import re
class A__ :
A__ = 'hp'
A__ = {}
A__ = None
@classmethod
def A ( cls : Optional[Any] , _a : Optional[Any] , _a : Any ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =prefix
_SCREAMING_SNAKE_CASE =defaults
cls.build_naming_info()
@staticmethod
def A ( _a : Optional[Any] , _a : List[Any] ) -> Any:
'''simple docstring'''
if len(_a ) == 0:
return ""
_SCREAMING_SNAKE_CASE =None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_a ) + 1 ):
_SCREAMING_SNAKE_CASE =word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_a : str ):
_SCREAMING_SNAKE_CASE =''
while integer != 0:
_SCREAMING_SNAKE_CASE =chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE =0
while True:
_SCREAMING_SNAKE_CASE =word + '#' + int_to_alphabetic(_a )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE =sword
break
_SCREAMING_SNAKE_CASE =short_word
_SCREAMING_SNAKE_CASE =word
return short_word
@staticmethod
def A ( _a : Optional[Any] , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =param_name.split('_' )
_SCREAMING_SNAKE_CASE =[TrialShortNamer.shortname_for_word(_a , _a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE =['', '_']
for separator in separators:
_SCREAMING_SNAKE_CASE =separator.join(_a )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE =shortname
_SCREAMING_SNAKE_CASE =param_name
return shortname
return param_name
@staticmethod
def A ( _a : Dict , _a : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TrialShortNamer.shortname_for_key(_a , _a )
_SCREAMING_SNAKE_CASE =short_name
_SCREAMING_SNAKE_CASE =param_name
@classmethod
def A ( cls : Optional[int] ) -> Tuple:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE ={
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
_SCREAMING_SNAKE_CASE =list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_a , _a )
_SCREAMING_SNAKE_CASE =info
@classmethod
def A ( cls : List[Any] , _a : int ) -> int:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE =[copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['short_param'][k]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =1 if v else 0
_SCREAMING_SNAKE_CASE ='' if isinstance(_a , (int, float) ) else '-'
_SCREAMING_SNAKE_CASE =f"{key}{sep}{v}"
name.append(_a )
return "_".join(_a )
@classmethod
def A ( cls : Optional[Any] , _a : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE =[]
else:
_SCREAMING_SNAKE_CASE =repr.split('_' )
_SCREAMING_SNAKE_CASE ={}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =value.split('-' )
else:
_SCREAMING_SNAKE_CASE =re.sub('[0-9.]' , '' , _a )
_SCREAMING_SNAKE_CASE =float(re.sub('[^0-9.]' , '' , _a ) )
_SCREAMING_SNAKE_CASE =cls.NAMING_INFO['reverse_short_param'][p_k]
_SCREAMING_SNAKE_CASE =p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE =cls.DEFAULTS[k]
return parameters
| 47 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase_ = '''resnet'''
lowercase_ = ['''basic''', '''bottleneck''']
def __init__( self : List[Any] , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[str]=[256, 512, 1_024, 2_048] , _lowerCAmelCase : str=[3, 4, 6, 3] , _lowerCAmelCase : Union[str, Any]="bottleneck" , _lowerCAmelCase : str="relu" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(**A__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embedding_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = downsample_in_first_stage
SCREAMING_SNAKE_CASE_ = ['stem'] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : Tuple ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : str ):
return 1E-3 | 359 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple=768 ):
super().__init__(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = proj_size
SCREAMING_SNAKE_CASE_ = CLIPVisionModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PaintByExampleMapper(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=False ):
SCREAMING_SNAKE_CASE_ = self.model(pixel_values=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = clip_output.pooler_output
SCREAMING_SNAKE_CASE_ = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE_ = self.final_layer_norm(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.proj_out(_lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE_ = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE_ = config.hidden_size
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = nn.ModuleList(
[
BasicTransformerBlock(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , activation_fn='gelu' , attention_bias=_lowerCAmelCase )
for _ in range(_lowerCAmelCase )
] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_lowerCAmelCase )
return hidden_states | 210 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : int = '''sew'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any]=3_2 , lowerCAmelCase_ : int=7_6_8 , lowerCAmelCase_ : Tuple=1_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : Optional[Any]=3_0_7_2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Tuple=1e-5 , lowerCAmelCase_ : Tuple="group" , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : int=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Tuple=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=1_2_8 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.05 , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Union[str, Any]="mean" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : List[str]=2 , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_A: Any = hidden_size
_A: Optional[int] = feat_extract_norm
_A: Union[str, Any] = feat_extract_activation
_A: Any = list(lowerCAmelCase_ )
_A: Optional[int] = list(lowerCAmelCase_ )
_A: Dict = list(lowerCAmelCase_ )
_A: List[str] = conv_bias
_A: Optional[int] = num_conv_pos_embeddings
_A: str = num_conv_pos_embedding_groups
_A: List[Any] = len(self.conv_dim )
_A: Optional[int] = num_hidden_layers
_A: List[str] = intermediate_size
_A: Tuple = squeeze_factor
_A: str = hidden_act
_A: Optional[Any] = num_attention_heads
_A: Union[str, Any] = hidden_dropout
_A: Tuple = attention_dropout
_A: Optional[int] = activation_dropout
_A: Any = feat_proj_dropout
_A: int = final_dropout
_A: Tuple = layerdrop
_A: Optional[int] = layer_norm_eps
_A: Optional[int] = initializer_range
_A: Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A: List[str] = apply_spec_augment
_A: Optional[int] = mask_time_prob
_A: Union[str, Any] = mask_time_length
_A: str = mask_time_min_masks
_A: int = mask_feature_prob
_A: Tuple = mask_feature_length
_A: Dict = mask_feature_min_masks
# ctc loss
_A: Union[str, Any] = ctc_loss_reduction
_A: Tuple = ctc_zero_infinity
# sequence classification
_A: Optional[Any] = use_weighted_layer_sum
_A: Tuple = classifier_proj_size
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 121 |
def lowerCamelCase__ ( a , a ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b"
_A: Union[str, Any] = str(bin(a ) )[2:] # remove the leading "0b"
_A: Optional[int] = max(len(a ) , len(a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a ) , b_binary.zfill(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = "cpu" , _UpperCAmelCase = None ) -> None:
lowerCamelCase =torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_UpperCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase =v.half()
if save_path is None: # overwrite src_path
lowerCamelCase =src_path
torch.save(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 262 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase__ : Any =None
UpperCAmelCase__ : List[str] =logging.get_logger(__name__)
UpperCAmelCase__ : Tuple ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ : Dict ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ : Optional[int] ={
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
UpperCAmelCase__ : Dict =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __A ( a ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = ["""input_ids""", """attention_mask"""]
__A = NllbTokenizer
__A = []
__A = []
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase =AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCamelCase =legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase =vocab_file
lowerCamelCase =False if not self.vocab_file else True
lowerCamelCase =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowerCamelCase ={
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase =src_lang if src_lang is not None else """eng_Latn"""
lowerCamelCase =self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self ):
return self._src_lang
@src_lang.setter
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCamelCase =[self.sep_token_id]
lowerCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCamelCase =src_lang
lowerCamelCase =self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =self.convert_tokens_to_ids(UpperCAmelCase_ )
lowerCamelCase =tgt_lang_id
return inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = "eng_Latn" , UpperCAmelCase_ = None , UpperCAmelCase_ = "fra_Latn" , **UpperCAmelCase_ , ):
lowerCamelCase =src_lang
lowerCamelCase =tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
lowerCamelCase =[]
lowerCamelCase =[self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase =[self.cur_lang_code]
lowerCamelCase =[self.eos_token_id]
lowerCamelCase =self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase =self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
lowerCamelCase =[]
lowerCamelCase =[self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase =[self.cur_lang_code]
lowerCamelCase =[self.eos_token_id]
lowerCamelCase =self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase =self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase =processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowerCamelCase =os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 262 | 1 |
"""simple docstring"""
from math import factorial
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_lowercase ) // (factorial(_lowercase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
F"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 66 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 0 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
lowerCAmelCase__ : List[Any] = cva.getAffineTransform(__UpperCAmelCase , __UpperCAmelCase )
return cva.warpAffine(__UpperCAmelCase , __UpperCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
_A = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
_A = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_A , _A = gray_img.shape
# set different points to rotate image
_A = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
_A = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
_A = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
_A = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
_A = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_A = plt.figure(1)
_A = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 212 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = "token-classification"
def __init__( self : Dict , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**UpperCamelCase )
lowerCAmelCase__ : Tuple = import_module("""tasks""" )
try:
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def _lowerCAmelCase ( self : int , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : Union[str, Any] = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : int = self._feature_file(UpperCamelCase )
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : int = torch.load(UpperCamelCase )
lowerCAmelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Union[str, Any] = self(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCAmelCase__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : List[str] = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Any = dict(enumerate(self.labels ) )
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : Optional[int] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ),
"""precision""": precision_score(UpperCamelCase , UpperCamelCase ),
"""recall""": recall_score(UpperCamelCase , UpperCamelCase ),
"""f1""": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCAmelCase__ : Dict = dict(results.items() )
lowerCAmelCase__ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
# when stable
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 212 | 1 |
from __future__ import annotations
def UpperCamelCase ( __lowercase : str ,__lowercase : list[str] | None = None ,__lowercase : dict[str, float] | None = None ,__lowercase : bool = False ,):
'''simple docstring'''
A_ : str = cipher_alphabet or [chr(__lowercase ) for i in range(97 ,1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
A_ : Tuple = {
'a': 0.0_84_97,
'b': 0.0_14_92,
'c': 0.0_22_02,
'd': 0.0_42_53,
'e': 0.1_11_62,
'f': 0.0_22_28,
'g': 0.0_20_15,
'h': 0.0_60_94,
'i': 0.0_75_46,
'j': 0.0_01_53,
'k': 0.0_12_92,
'l': 0.0_40_25,
'm': 0.0_24_06,
'n': 0.0_67_49,
'o': 0.0_75_07,
'p': 0.0_19_29,
'q': 0.0_00_95,
'r': 0.0_75_87,
's': 0.0_63_27,
't': 0.0_93_56,
'u': 0.0_27_58,
'v': 0.0_09_78,
'w': 0.0_25_60,
'x': 0.0_01_50,
'y': 0.0_19_94,
'z': 0.0_00_77,
}
else:
# Custom frequencies dictionary
A_ : Any = frequencies_dict
if not case_sensitive:
A_ : Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
A_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__lowercase ) ):
A_ : Any = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
A_ : str = (alphabet_letters.index(letter.lower() ) - shift) % len(
__lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
A_ : Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
A_ : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
A_ : int = decrypted_with_shift.lower().count(__lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
A_ : Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
A_ : Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
A_ : str = decrypted_with_shift.count(__lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
A_ : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
A_ : Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
A_ : Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__lowercase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
A_ : int = min(
__lowercase ,key=__lowercase ,)
# Get all the data from the most likely cipher (key, decoded message)
(
(
A_
) , (
A_
) ,
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 140 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''char'''
lowerCamelCase_ = '''bpe'''
lowerCamelCase_ = '''wp'''
_UpperCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase_ = '''ViTImageProcessor'''
lowerCamelCase_ = '''MgpstrTokenizer'''
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
A_ : Optional[int] = kwargs.pop('feature_extractor' )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
A_ : Union[str, Any] = tokenizer
A_ : List[Any] = AutoTokenizer.from_pretrained('gpt2' )
A_ : Any = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
A_ : List[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None:
A_ : Union[str, Any] = self.char_tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
A_ : Optional[int] = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ , A_ : int = sequences
A_ : Union[str, Any] = char_preds.size(0 )
A_ , A_ : Union[str, Any] = self._decode_helper(lowercase , 'char' )
A_ , A_ : List[str] = self._decode_helper(lowercase , 'bpe' )
A_ , A_ : Optional[int] = self._decode_helper(lowercase , 'wp' )
A_ : Dict = []
A_ : Optional[int] = []
for i in range(lowercase ):
A_ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]]
A_ : int = [char_strs[i], bpe_strs[i], wp_strs[i]]
A_ : Union[str, Any] = scores.index(max(lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A_ : Dict = {}
A_ : str = final_strs
A_ : Union[str, Any] = final_scores
A_ : Optional[Any] = char_strs
A_ : Dict = bpe_strs
A_ : str = wp_strs
return out
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A_ : List[Any] = self.char_decode
A_ : List[Any] = 1
A_ : str = '[s]'
elif format == DecodeType.BPE:
A_ : List[Any] = self.bpe_decode
A_ : Optional[int] = 2
A_ : Tuple = '#'
elif format == DecodeType.WORDPIECE:
A_ : Optional[int] = self.wp_decode
A_ : Optional[int] = 1_0_2
A_ : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
A_ , A_ : Dict = [], []
A_ : Any = pred_logits.size(0 )
A_ : Optional[int] = pred_logits.size(1 )
A_ , A_ : int = pred_logits.topk(1 , dim=-1 , largest=lowercase , sorted=lowercase )
A_ : Dict = preds_index.view(-1 , lowercase )[:, 1:]
A_ : Any = decoder(lowercase )
A_ , A_ : List[Any] = torch.nn.functional.softmax(lowercase , dim=2 ).max(dim=2 )
A_ : List[str] = preds_max_prob[:, 1:]
for index in range(lowercase ):
A_ : int = preds_str[index].find(lowercase )
A_ : Union[str, Any] = preds_str[index][:pred_eos]
A_ : Dict = preds_index[index].cpu().tolist()
A_ : List[str] = pred_index.index(lowercase ) if eos_token in pred_index else -1
A_ : List[str] = preds_max_prob[index][: pred_eos_index + 1]
A_ : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase )
conf_scores.append(lowercase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowercase )]
return decode_strs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowercase )]
return decode_strs
| 140 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase_ = '''bert-base-cased'''
UpperCamelCase_ = '''fp16'''
UpperCamelCase_ = '''bf16'''
UpperCamelCase_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: List[Any] ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : Dict = dict(
ACCELERATE_USE_FSDP="""true""" ,MASTER_ADDR="""localhost""" ,MASTER_PORT="""10999""" ,RANK="""0""" ,LOCAL_RANK="""0""" ,WORLD_SIZE="""1""" ,)
def A__ ( self: Union[str, Any] ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = self.dist_env.copy()
UpperCAmelCase_ : int = F'''{i + 1}'''
UpperCAmelCase_ : Optional[int] = strategy
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def A__ ( self: List[Any] ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : int = self.dist_env.copy()
UpperCAmelCase_ : str = prefetch_policy
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def A__ ( self: List[str] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = self.dist_env.copy()
UpperCAmelCase_ : Any = state_dict_type
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ : Tuple = AutoModel.from_pretrained(lowerCamelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase_ : Tuple = self.dist_env.copy()
UpperCAmelCase_ : Any = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase_ : str = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase_ : Tuple = """2000"""
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase_ : Dict = self.dist_env.copy()
UpperCAmelCase_ : List[str] = """TRANSFORMER_BASED_WRAP"""
UpperCAmelCase_ : str = """T5Layer"""
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCAmelCase_ : Optional[int] = self.dist_env.copy()
UpperCAmelCase_ : int = """SIZE_BASED_WRAP"""
UpperCAmelCase_ : List[Any] = """0"""
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def A__ ( self: Dict ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase_ : List[str] = self.dist_env.copy()
UpperCAmelCase_ : List[str] = mp_dtype
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : int = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase_ : List[Any] = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase_ : Optional[Any] = torch.bfloataa
UpperCAmelCase_ : Dict = MixedPrecision(param_dtype=lowerCamelCase_ ,reduce_dtype=lowerCamelCase_ ,buffer_dtype=lowerCamelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,lowerCamelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,lowerCamelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase_ : Any = self.dist_env.copy()
UpperCAmelCase_ : Dict = str(lowerCamelCase_ ).lower()
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=lowerCamelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : List[str] = 0.8_2
UpperCAmelCase_ : List[str] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCAmelCase_ : List[str] = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase_ : List[Any] = 160
UpperCAmelCase_ : str = 160
UpperCAmelCase_ : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : str = os.path.join(self.test_scripts_folder ,"""test_performance.py""" )
UpperCAmelCase_ : Optional[int] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCAmelCase_ : int = cmd.copy()
for i, strategy in enumerate(lowerCamelCase_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Dict = os.path.join(self.test_scripts_folder ,"""test_checkpointing.py""" )
UpperCAmelCase_ : Tuple = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Any = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase_ : Union[str, Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
UpperCAmelCase_ : Union[str, Any] = cmd_config[:-1]
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdir ,"""epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.test_scripts_folder ,"""test_peak_memory_usage.py""" )
UpperCAmelCase_ : List[Any] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase_ : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 357 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [False] * len(_a )
UpperCAmelCase_ : Any = [-1] * len(_a )
def dfs(_a : Optional[int] , _a : str ):
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_a , 1 - c )
for i in range(len(_a ) ):
if not visited[i]:
dfs(_a , 0 )
for i in range(len(_a ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 59 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = tokenizer(example["content"] , truncation=lowerCAmelCase_ )["input_ids"]
__SCREAMING_SNAKE_CASE = len(example["content"] ) / len(output["input_ids"] )
return output
a__ : Dict = HfArgumentParser(PretokenizationArguments)
a__ : Optional[int] = parser.parse_args()
if args.num_workers is None:
a__ : Optional[int] = multiprocessing.cpu_count()
a__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a__ : Optional[Any] = time.time()
a__ : Optional[Any] = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
a__ : int = time.time()
a__ : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
a__ : Optional[int] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 54 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
__a : int = BlenderbotConfig
__a : Any = {}
__a : str = "gelu"
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Any=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : Any=99 , __magic_name__ : List[Any]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[Any]=4 , __magic_name__ : List[str]=37 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=20 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : str = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : Any = input_ids[:1, :]
UpperCAmelCase_ : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[str] = inputs_dict['''head_mask''']
UpperCAmelCase_ : Any = 1
# first forward pass
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Tuple=None, SCREAMING_SNAKE_CASE__ : Any=None, ) -> Any:
if attention_mask is None:
UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a : List[str] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Any = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class __a (unittest.TestCase ):
__a : Union[str, Any] = ["My friends are cool but they eat too many carbs."]
__a : List[Any] = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 125 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : List[Any] ,lowerCamelCase__ : float ,lowerCamelCase__ : Callable ,lowerCamelCase__ : int ,lowerCamelCase__ : float = 1.0 ,lowerCamelCase__ : str = None ,):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Union[str, Any] = initial_learning_rate
_UpperCamelCase : Optional[Any] = warmup_steps
_UpperCamelCase : Tuple = power
_UpperCamelCase : Union[str, Any] = decay_schedule_fn
_UpperCamelCase : Any = name
def __call__( self : List[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_UpperCamelCase : int = tf.cast(lowerCamelCase__ ,tf.floataa )
_UpperCamelCase : Optional[int] = tf.cast(self.warmup_steps ,tf.floataa )
_UpperCamelCase : Tuple = global_step_float / warmup_steps_float
_UpperCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=lowerCamelCase__ ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 0.9 , UpperCAmelCase_ = 0.999 , UpperCAmelCase_ = 1E-8 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 1.0 , UpperCAmelCase_ = None , ):
_UpperCamelCase : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
_UpperCamelCase : Optional[int] = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
_UpperCamelCase : Dict = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
_UpperCamelCase : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__ ( lowercase ):
def __init__( self : int ,lowerCamelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 ,lowerCamelCase__ : float = 0.9 ,lowerCamelCase__ : float = 0.9_9_9 ,lowerCamelCase__ : float = 1E-7 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "AdamWeightDecay" ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = weight_decay_rate
_UpperCamelCase : Tuple = include_in_weight_decay
_UpperCamelCase : int = exclude_from_weight_decay
@classmethod
def UpperCamelCase_ ( cls : int ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = {'WarmUp': WarmUp}
return super(lowerCamelCase__ ,cls ).from_config(lowerCamelCase__ ,custom_objects=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super(lowerCamelCase__ ,self )._prepare_local(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = tf.constant(
self.weight_decay_rate ,name='adam_weight_decay_rate' )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] ,use_locking=self._use_locking ,)
return tf.no_op()
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ ,self ).apply_gradients(zip(lowerCamelCase__ ,lowerCamelCase__ ) ,name=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_UpperCamelCase : Tuple = apply_state or {}
_UpperCamelCase : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_UpperCamelCase : Dict = self._fallback_apply_state(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : int = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
_UpperCamelCase : str = self._get_lr(var.device ,var.dtype.base_dtype ,lowerCamelCase__ )
_UpperCamelCase : Optional[int] = self._decay_weights_op(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ ,self )._resource_apply_dense(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
_UpperCamelCase : int = self._get_lr(var.device ,var.dtype.base_dtype ,lowerCamelCase__ )
_UpperCamelCase : Any = self._decay_weights_op(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ ,self )._resource_apply_sparse(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ ,lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ ,lowerCamelCase__ ) is not None:
return False
return True
class lowercase__ ( lowercase ):
def __init__( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Any = []
_UpperCamelCase : Optional[Any] = None
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self._accum_steps is None:
_UpperCamelCase : Union[str, Any] = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=lowerCamelCase__ ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if not self._gradients:
_UpperCamelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) ,trainable=lowerCamelCase__ ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}' )
for accum_gradient, gradient in zip(self._gradients ,lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 352 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[str] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
snake_case_ : str = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
snake_case_ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = NllbTokenizer
lowercase__ = []
lowercase__ = []
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : List[Any]="<s>" ,lowerCamelCase__ : Dict="</s>" ,lowerCamelCase__ : List[Any]="</s>" ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : List[Any]="<unk>" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
_UpperCamelCase : Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,legacy_behaviour=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : int = vocab_file
_UpperCamelCase : int = False if not self.vocab_file else True
_UpperCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_UpperCamelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_UpperCamelCase : List[str] = src_lang if src_lang is not None else 'eng_Latn'
_UpperCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
_UpperCamelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Dict = [self.sep_token_id]
_UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] ,lowerCamelCase__ : Optional[str] ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : Optional[Any] = self(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ )
_UpperCamelCase : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str = "eng_Latn" ,lowerCamelCase__ : Optional[List[str]] = None ,lowerCamelCase__ : str = "fra_Latn" ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Tuple = src_lang
_UpperCamelCase : List[str] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : int = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : List[Any] = [self.cur_lang_code]
_UpperCamelCase : List[Any] = [self.eos_token_id]
_UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.convert_tokens_to_ids(lowerCamelCase__ )
if self.legacy_behaviour:
_UpperCamelCase : Tuple = []
_UpperCamelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase : Tuple = [self.cur_lang_code]
_UpperCamelCase : Optional[Any] = [self.eos_token_id]
_UpperCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
_UpperCamelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_UpperCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 236 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 64 | """simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44 | 0 |
"""simple docstring"""
from collections import deque
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase ) -> None:
lowerCamelCase_ = process_name # process name
lowerCamelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase_ = arrival_time
lowerCamelCase_ = burst_time # remaining burst time
lowerCamelCase_ = 0 # total time of the process wait in ready queue
lowerCamelCase_ = 0 # time from arrival time to completion time
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase , lowercase , ) -> None:
# total number of mlfq's queues
lowerCamelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase_ = time_slices
# unfinished process is in this ready_queue
lowerCamelCase_ = queue
# current time
lowerCamelCase_ = current_time
# finished process is in this sequence queue
lowerCamelCase_ = deque()
def SCREAMING_SNAKE_CASE_( self ) -> list[str]:
lowerCamelCase_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
lowerCamelCase_ = []
for i in range(len(lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> list[int]:
return [q.burst_time for q in queue]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> deque[Process]:
lowerCamelCase_ = deque() # sequence deque of finished process
while len(lowercase ) != 0:
lowerCamelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase_ = 0
# set the process's turnaround time because it is finished
lowerCamelCase_ = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> tuple[deque[Process], deque[Process]]:
lowerCamelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase ) ):
lowerCamelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase_ = 0
# set the finish time
lowerCamelCase_ = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase )
self.finish_queue.extend(lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def SCREAMING_SNAKE_CASE_( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase_ , lowerCamelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__A =Process('''P1''', 0, 5_3)
__A =Process('''P2''', 0, 1_7)
__A =Process('''P3''', 0, 6_8)
__A =Process('''P4''', 0, 2_4)
__A =3
__A =[1_7, 2_5]
__A =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__A =Process('''P1''', 0, 5_3)
__A =Process('''P2''', 0, 1_7)
__A =Process('''P3''', 0, 6_8)
__A =Process('''P4''', 0, 2_4)
__A =3
__A =[1_7, 2_5]
__A =deque([Pa, Pa, Pa, Pa])
__A =MLFQ(number_of_queues, time_slices, queue, 0)
__A =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 358 |
__A ='''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 47 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : float , _a : float , _a : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
'''simple docstring'''
__A ={
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__A ={value: key for key, value in encode_dict.items()}
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def _UpperCamelCase ( UpperCamelCase__ ):
if set(__snake_case ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only \'A\', \'B\' and spaces""" )
UpperCAmelCase__ : int = """"""
for word in coded.split():
while len(__snake_case ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase__ : Dict = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 362 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A ='\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A ='\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__A ='\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCamelCase ( UpperCamelCase__ ):
def remove_articles(UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 1_0_0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase__ : List[Any] = Counter(UpperCamelCase__ )
UpperCAmelCase__ : str = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase__ : Dict = scount * numref
UpperCAmelCase__ : int = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase__ : Union[str, Any] = ccount * numref
# KEEP
UpperCAmelCase__ : str = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase__ : List[Any] = keepgramcounter_rep & rgramcounter
UpperCAmelCase__ : Dict = sgramcounter_rep & rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase__ : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase__ : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase__ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase__ : str = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase__ : Optional[Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase__ : List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : Union[str, Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[Any] = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
UpperCAmelCase__ : Tuple = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : int = addtmpscore / len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase__ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = ssent.split(""" """ )
UpperCAmelCase__ : Optional[int] = csent.split(""" """ )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : List[Any] = []
for rsent in rsents:
UpperCAmelCase__ : List[str] = rsent.split(""" """ )
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Dict = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : Any = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : str = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Dict = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : int = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Any = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase__ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase__ : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase__ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase__ : Tuple = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
UpperCAmelCase__ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
UpperCAmelCase__ : Dict = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = sentence
if not return_str:
UpperCAmelCase__ : List[str] = normalized_sent.split()
return normalized_sent
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase__ : Optional[int] = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
UpperCAmelCase__ : Optional[int] = sari_score / len(UpperCamelCase__ )
return 1_0_0 * sari_score
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
UpperCAmelCase__ : int = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase__ : int = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
UpperCAmelCase__ : int = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = {}
result.update({"""sari""": compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""exact""": compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase)})
return result | 283 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=1024 ):
snake_case , snake_case : Tuple = [], []
snake_case : str = list(zip(__lowerCamelCase , __lowerCamelCase ) )
snake_case , snake_case : List[str] = sorted_examples[0]
def is_too_big(__lowerCamelCase : int ):
return tok(__lowerCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case : Optional[Any] = new_src + " " + src
snake_case : Dict = new_tgt + " " + tgt
if is_too_big(__lowerCamelCase ) or is_too_big(__lowerCamelCase ): # cant fit, finalize example
finished_src.append(__lowerCamelCase )
finished_tgt.append(__lowerCamelCase )
snake_case , snake_case : Optional[Any] = src, tgt
else: # can fit, keep adding
snake_case , snake_case : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCamelCase )
finished_tgt.append(__lowerCamelCase )
return finished_src, finished_tgt
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Path , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
snake_case : Union[str, Any] = Path(__lowerCamelCase )
save_path.mkdir(exist_ok=__lowerCamelCase )
for split in ["train"]:
snake_case , snake_case : str = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case : List[str] = [x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()]
snake_case : Tuple = [x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()]
snake_case , snake_case : Optional[int] = pack_examples(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(f"""packed {split} split from {len(__lowerCamelCase )} examples -> {len(__lowerCamelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(__lowerCamelCase ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(__lowerCamelCase ) )
for split in ["val", "test"]:
snake_case , snake_case : Optional[int] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(__lowerCamelCase , save_path / f"""{split}.source""" )
shutil.copyfile(__lowerCamelCase , save_path / f"""{split}.target""" )
def UpperCamelCase ( ):
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__lowerCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__lowerCamelCase , default=128 )
parser.add_argument("--data_dir" , type=__lowerCamelCase )
parser.add_argument("--save_path" , type=__lowerCamelCase )
snake_case : Dict = parser.parse_args()
snake_case : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 59 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( __UpperCAmelCase ) -> tuple:
return (data["data"], data["target"])
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
lowerCAmelCase__ : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__UpperCAmelCase , __UpperCAmelCase )
# Predict target for test data
lowerCAmelCase__ : Dict = xgb.predict(__UpperCAmelCase )
lowerCAmelCase__ : Any = predictions.reshape(len(__UpperCAmelCase ) , 1 )
return predictions
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = fetch_california_housing()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = data_handling(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.25 , random_state=1 )
lowerCAmelCase__ : Optional[Any] = xgboost(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 242 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _a :
__a : Optional[Any] = MBartConfig
__a : List[str] = {}
__a : str = """gelu"""
def __init__( self : List[str] , lowercase : Optional[Any] , lowercase : Any=13 , lowercase : Any=7 , lowercase : str=True , lowercase : Union[str, Any]=False , lowercase : str=99 , lowercase : Union[str, Any]=32 , lowercase : int=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : Dict=20 , lowercase : List[str]=2 , lowercase : Optional[Any]=1 , lowercase : str=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def A ( self : Any , lowercase : List[str] , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=lowercase ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def snake_case_ (_a : Tuple , _a : Union[str, Any] , _a : Optional[Any] , _a : List[str]=None , _a : int=None , _a : Any=None , _a : List[str]=None , _a : Union[str, Any]=None , ):
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( __a , __a , unittest.TestCase ):
__a : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__a : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__a : Optional[int] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : Tuple = True
__a : List[Any] = False
__a : Tuple = False
def A ( self : Tuple , lowercase : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : List[Any] , lowercase : List[Any] ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
__a : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
__a : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
__a : str = """facebook/mbart-large-en-ro"""
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A ( self : str , **lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**lowercase )
self.assertListEqual(self.expected_text , lowercase )
def A ( self : List[Any] , **lowercase : int ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **lowercase , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
return generated_words
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 34 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__:Tuple = {
"""openbmb/cpm-ant-10b""": 1024,
}
def _lowerCamelCase( a ):
__a = collections.OrderedDict()
with open(a , "r" , encoding="utf-8" ) as reader:
__a = reader.readlines()
for index, token in enumerate(a ):
__a = token.rstrip("\n" )
__a = index
return vocab
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
__a = vocab
__a = unk_token
__a = max_input_chars_per_word
def a__ ( self , lowerCamelCase ):
__a = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__a = 0
__a = []
while start < len(lowerCamelCase ):
__a = len(lowerCamelCase )
__a = None
while start < end:
__a = "".join(chars[start:end] )
if substr in self.vocab:
__a = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
__a = end
return sub_tokens
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = ["""input_ids""", """attention_mask"""]
_snake_case : int = False
def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
__a = bod_token
__a = eod_token
__a = load_vocab(lowerCamelCase )
__a = self.encoder[space_token]
__a = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
__a = {v: k for k, v in self.encoder.items()}
__a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a__ ( self ):
return self.encoder[self.bod_token]
@property
def a__ ( self ):
return self.encoder[self.eod_token]
@property
def a__ ( self ):
return self.encoder["\n"]
@property
def a__ ( self ):
return len(self.encoder )
def a__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def a__ ( self , lowerCamelCase , **lowerCamelCase ):
__a = [i for i in token_ids if i >= 0]
__a = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return token in self.encoder
def a__ ( self , lowerCamelCase ):
return "".join(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if os.path.isdir(lowerCamelCase ):
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__a = (filename_prefix + "-" if filename_prefix else "") + save_directory
__a = 0
if " " in self.encoder:
__a = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
__a = self.encoder["\n"]
del self.encoder["\n"]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
__a = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 268 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a , a , a , ):
__a = len(a )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a , a , )
def _lowerCamelCase( a ):
__a = []
depth_first_search([] , [] , [] , a , a )
# Print all the boards
for board in boards:
for column in board:
print(a )
print("" )
print(len(a ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 268 | 1 |
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : int = None
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Dict = graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = None
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if sources is int:
UpperCamelCase__ : List[str] = [sources]
if sinks is int:
UpperCamelCase__ : Any = [sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
UpperCamelCase__ : List[Any] = sources[0]
UpperCamelCase__ : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
UpperCamelCase__ : Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase__ : Union[str, Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase__ : Tuple = max_input_flow
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase__ : Optional[int] = max_input_flow
UpperCamelCase__ : int = size - 1
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = algorithm(self )
class __a :
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = flow_network
UpperCamelCase__ : List[Any] = flow_network.verticesCount
UpperCamelCase__ : Optional[int] = flow_network.sourceIndex
UpperCamelCase__ : int = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase__ : int = flow_network.graph
UpperCamelCase__ : int = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if not self.executed:
self._algorithm()
UpperCamelCase__ : int = True
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
class __a ( _lowerCAmelCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
UpperCamelCase__ : Any = -1
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class __a ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase__ : List[str] = [0] * self.verticies_count
UpperCamelCase__ : str = [0] * self.verticies_count
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase__ : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase__ : Dict = 0
while i < len(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = vertices_list[i]
UpperCamelCase__ : List[Any] = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = 0
else:
i += 1
UpperCamelCase__ : int = sum(self.preflow[self.source_index] )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase__ : List[Any] = self.heights[to_index]
if min_height is not None:
UpperCamelCase__ : int = min_height + 1
if __name__ == "__main__":
lowerCamelCase : Dict =[0]
lowerCamelCase : Dict =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase : Optional[Any] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase : Dict =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase : Optional[Any] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""") | 189 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = DiTPipeline
__UpperCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__snake_case : Dict = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , )
__snake_case : Union[str, Any] = AutoencoderKL()
__snake_case : int = DDIMScheduler()
__snake_case : Tuple = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple=0 ) -> int:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : str = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = "cpu"
__snake_case : int = self.get_dummy_components()
__snake_case : str = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case : int = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __snake_case ( self : List[str] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Any = torch.manual_seed(0 )
__snake_case : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__snake_case : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"]
__snake_case : Optional[Any] = pipe.get_label_ids(lowerCamelCase )
__snake_case : List[Any] = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : int = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__snake_case : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__snake_case : Tuple = ["vase", "umbrella"]
__snake_case : List[str] = pipe.get_label_ids(lowerCamelCase )
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : str = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 123 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
SCREAMING_SNAKE_CASE :Optional[int] = None
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE :Optional[Any] = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE :int = {
"""google/fnet-base""": 512,
"""google/fnet-large""": 512,
}
SCREAMING_SNAKE_CASE :List[Any] = """▁"""
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Any = ["""input_ids""", """token_type_ids"""]
UpperCamelCase_ :int = FNetTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase="<unk>" , _lowercase="[SEP]" , _lowercase="<pad>" , _lowercase="[CLS]" , _lowercase="[MASK]" , **_lowercase , )-> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCamelCase_ = (
AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase , normalized=_lowercase )
if isinstance(_lowercase , _lowercase )
else mask_token
)
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = remove_space
UpperCamelCase_ = keep_accents
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None )-> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None )-> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None )-> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 60 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE :Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
SCREAMING_SNAKE_CASE :Optional[int] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
SCREAMING_SNAKE_CASE :Tuple = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def UpperCAmelCase_ ( self )-> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , )-> int:
UpperCamelCase_ = len(references[0] )
if any(len(_lowercase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase_ = [[refs[i] for refs in references] for i in range(_lowercase )]
UpperCamelCase_ = TER(
normalized=_lowercase , no_punct=_lowercase , asian_support=_lowercase , case_sensitive=_lowercase , )
UpperCamelCase_ = sb_ter.corpus_score(_lowercase , _lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 60 | 1 |
from collections import Counter
from timeit import timeit
def __UpperCamelCase ( lowerCAmelCase__ : List[str] = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def __UpperCamelCase ( lowerCAmelCase__ : List[str] = "" ):
if len(__lowerCAmelCase ) == 0:
return True
__a : Dict = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a : Tuple = {}
for character in lower_case_input_str:
__a : Tuple = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
__a : List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] = "" ):
print('''\nFor string = ''' , __lowerCAmelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase__ =input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
lowercase__ =can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 216 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: str = logging.get_logger(__name__)
__a: str = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "lxmert"
SCREAMING_SNAKE_CASE = {}
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=9500 , __lowerCAmelCase=1600 , __lowerCAmelCase=400 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=9 , __lowerCAmelCase=5 , __lowerCAmelCase=5 , __lowerCAmelCase=2048 , __lowerCAmelCase=4 , __lowerCAmelCase=6.6_7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ) -> Optional[Any]:
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Any = hidden_size
lowercase__ : int = num_attention_heads
lowercase__ : str = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Dict = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[str] = num_qa_labels
lowercase__ : Any = num_object_labels
lowercase__ : List[str] = num_attr_labels
lowercase__ : Optional[int] = l_layers
lowercase__ : Optional[Any] = x_layers
lowercase__ : List[str] = r_layers
lowercase__ : List[Any] = visual_feat_dim
lowercase__ : Optional[Any] = visual_pos_dim
lowercase__ : List[Any] = visual_loss_normalizer
lowercase__ : int = task_matched
lowercase__ : int = task_mask_lm
lowercase__ : List[str] = task_obj_predict
lowercase__ : Dict = task_qa
lowercase__ : Union[str, Any] = visual_obj_loss
lowercase__ : Any = visual_attr_loss
lowercase__ : Union[str, Any] = visual_feat_loss
lowercase__ : List[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCAmelCase )
| 214 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
def merge(UpperCAmelCase , UpperCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase ) <= 1:
return collection
lowercase__ : int = len(UpperCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a: str = input("""Enter numbers separated by a comma:\n""").strip()
__a: Optional[int] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 214 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
snake_case__ : Any = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
snake_case__ : Optional[Any] = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ) -> Tuple:
'''simple docstring'''
__A = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__A = bs[:]
__A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
__A = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
def _a ( lowerCamelCase: Dict ) -> Any:
'''simple docstring'''
__A = set()
__A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A = char
return pairs
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__(self :Optional[int] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Tuple , _UpperCamelCase :Tuple="replace" , _UpperCamelCase :Optional[Any]="<s>" , _UpperCamelCase :Any="</s>" , _UpperCamelCase :Dict="</s>" , _UpperCamelCase :Optional[int]="<s>" , _UpperCamelCase :List[Any]="<unk>" , _UpperCamelCase :Tuple="<pad>" , _UpperCamelCase :Union[str, Any]="<mask>" , _UpperCamelCase :Any=False , **_UpperCamelCase :List[str] , )-> Union[str, Any]:
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
__A = json.load(_UpperCamelCase )
__A = {v: k for k, v in self.encoder.items()}
__A = errors # how to handle errors in decoding
__A = bytes_to_unicode()
__A = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
__A = merges_handle.read().split('''\n''' )[1:-1]
__A = [tuple(merge.split() ) for merge in bpe_merges]
__A = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
__A = {}
__A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowerCAmelCase (self :Tuple )-> Tuple:
return len(self.encoder )
def _lowerCAmelCase (self :Optional[int] )-> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Tuple )-> Optional[int]:
if token in self.cache:
return self.cache[token]
__A = tuple(_UpperCamelCase )
__A = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
__A = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A = bigram
__A = []
__A = 0
while i < len(_UpperCamelCase ):
try:
__A = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A = tuple(_UpperCamelCase )
__A = new_word
if len(_UpperCamelCase ) == 1:
break
else:
__A = get_pairs(_UpperCamelCase )
__A = ''' '''.join(_UpperCamelCase )
__A = word
return word
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[str] )-> Dict:
__A = []
for token in re.findall(self.pat , _UpperCamelCase ):
__A = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Dict )-> Any:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Optional[int] )-> str:
return self.decoder.get(_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Tuple )-> Any:
__A = ''''''.join(_UpperCamelCase )
__A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '''\n''' )
__A = 0
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__A = token_index
writer.write(''' '''.join(_UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :int=False , **_UpperCamelCase :Union[str, Any] )-> Optional[int]:
__A = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
__A = ''' ''' + text
return (text, kwargs)
| 117 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase_ :int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = self.dummy_uncond_unet
lowerCAmelCase_ :Tuple = ScoreSdeVeScheduler()
lowerCAmelCase_ :Optional[int] = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
lowerCAmelCase_ :Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__A ).images
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__A , return_dict=__A )[
0
]
lowerCAmelCase_ :int = image[0, -3:, -3:, -1]
lowerCAmelCase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ :Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = """google/ncsnpp-church-256"""
lowerCAmelCase_ :Optional[int] = UNetaDModel.from_pretrained(__A )
lowerCAmelCase_ :str = ScoreSdeVeScheduler.from_pretrained(__A )
lowerCAmelCase_ :Union[str, Any] = ScoreSdeVePipeline(unet=__A , scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__A ).images
lowerCAmelCase_ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ :List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
from math import pi
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) ->float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 58 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ):
'''simple docstring'''
A_ : Any = {}
if train_file is not None:
A_ : Tuple = [train_file]
if eval_file is not None:
A_ : Tuple = [eval_file]
if test_file is not None:
A_ : Tuple = [test_file]
A_ : Optional[int] = datasets.load_dataset("""csv""" , data_files=lowerCamelCase__ )
A_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
A_ : List[Any] = features_name.pop(lowerCamelCase__ )
A_ : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
A_ : Any = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Tuple = tokenizer.model_input_names
A_ : List[str] = {}
if len(lowerCamelCase__ ) == 1:
for k in files.keys():
A_ : Optional[int] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) , batched=lowerCamelCase__ , )
elif len(lowerCamelCase__ ) == 2:
for k in files.keys():
A_ : Union[str, Any] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , ) , batched=lowerCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A_ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
A_ : int = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A_ : int = {k: v for k, v in ex.items() if k in input_names}
A_ : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A_ : str = {k: v for k, v in ex.items() if k in input_names}
A_ : List[str] = labelaid[ex[label_name]]
yield (d, label)
A_ : List[str] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A_ : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A_ : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A_ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A_ : Optional[int] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A_ : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase :Dict = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = field(metadata={'help': 'Which column contains the label'} )
__SCREAMING_SNAKE_CASE : str = field(default=__UpperCAmelCase , metadata={'help': 'The path of the training file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the development file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the test file'} )
__SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A_ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase__ ) -> Dict:
A_ : Any = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A_ : str = TFTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ : List[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A_ : Union[str, Any] = trainer.evaluate()
A_ : str = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(lowerCamelCase__ )
return results
if __name__ == "__main__":
main() | 369 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __a ( lowerCamelCase__ ):
__lowercase : List[Any] = 'xlm-roberta-xl'
def __init__( self , lowerCAmelCase__=250_880 , lowerCAmelCase__=2_560 , lowerCAmelCase__=36 , lowerCAmelCase__=32 , lowerCAmelCase__=10_240 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=514 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowercase__: int = vocab_size
lowercase__: Tuple = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: int = hidden_act
lowercase__: Tuple = intermediate_size
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: List[str] = type_vocab_size
lowercase__: List[str] = initializer_range
lowercase__: Any = layer_norm_eps
lowercase__: Union[str, Any] = position_embedding_type
lowercase__: Union[str, Any] = use_cache
lowercase__: str = classifier_dropout
class __a ( lowerCamelCase__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__: Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__: Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 196 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : List[str] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content
else:
with open(__lowerCamelCase ,'''rb''' ) as f:
lowerCAmelCase__ : int = f.read()
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate )
if not isinstance(__lowerCamelCase ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCAmelCase__ : Any = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,__lowerCamelCase ):
lowerCAmelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : List[str] = text_inputs[0][0]
lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=0 )
lowerCAmelCase__ : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCAmelCase__ : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 129 | 0 |
"""simple docstring"""
def a_ ( _lowercase = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 128 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCamelCase_ =5
UpperCamelCase_ =10
@require_sentencepiece
@require_tokenizers
class _a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = SpeechaTextTokenizer
UpperCamelCase = False
UpperCamelCase = True
def snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase : Dict = sp.SentencePieceProcessor()
spm_model.Load(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase__ ) )]
_UpperCamelCase : Tuple = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase : Dict = Path(self.tmpdirname )
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_UpperCamelCase : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = '''<pad>'''
_UpperCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ )
def snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(lowerCAmelCase__ ), 1_0_0_1 )
def snake_case ( self : Any ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_1 )
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ), [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6], )
_UpperCamelCase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
_UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], )
@slow
def snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', )
@require_sentencepiece
class _a ( unittest.TestCase ):
UpperCamelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCamelCase = '''C\'est trop cool'''
UpperCamelCase = '''Esto es genial'''
@classmethod
def snake_case ( cls : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 1_1 )
def snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size, 1_0_0_0_0 )
def snake_case ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids )
_UpperCamelCase : List[Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
_UpperCamelCase : Optional[Any] = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = '''fr'''
_UpperCamelCase : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCAmelCase__ )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
_UpperCamelCase : List[str] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 128 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A__: Dict = data_utils.TransfoXLTokenizer
A__: str = data_utils.TransfoXLCorpus
A__: str = data_utils
A__: Optional[int] = data_utils
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCAmelCase ,"""rb""" ) as fp:
_a : int =pickle.load(_UpperCAmelCase ,encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_a : Any =pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
_a : Union[str, Any] =corpus.vocab.__dict__
torch.save(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[Any] =corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" ,_UpperCAmelCase )
_a : Any =pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(_UpperCAmelCase ,_UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_a : int =os.path.abspath(_UpperCAmelCase )
_a : List[str] =os.path.abspath(_UpperCAmelCase )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_a : Dict =TransfoXLConfig()
else:
_a : Any =TransfoXLConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_a : Tuple =TransfoXLLMHeadModel(_UpperCAmelCase )
_a : Tuple =load_tf_weights_in_transfo_xl(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
_a : str =os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] =os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Save PyTorch model to {os.path.abspath(_UpperCAmelCase )}" )
torch.save(model.state_dict() ,_UpperCAmelCase )
print(F"Save configuration file to {os.path.abspath(_UpperCAmelCase )}" )
with open(_UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__: Any = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
A__: List[str] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 276 | """simple docstring"""
import sys
from collections import defaultdict
class UpperCamelCase :
def __init__( self) -> Optional[int]:
snake_case_ = []
def a_ ( self, lowerCAmelCase__) -> Any:
return self.node_position[vertex]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = pos
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ = 2 * start + 1
else:
snake_case_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ = temp, tempa
snake_case_ = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child], self.get_position(positions[start]))
self.set_position(positions[start], lowerCAmelCase__)
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = position[index]
while index != 0:
snake_case_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
snake_case_ = heap[parent]
snake_case_ = position[parent]
self.set_position(position[parent], lowerCAmelCase__)
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, lowerCAmelCase__)
break
snake_case_ = parent
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, 0)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = len(lowerCAmelCase__) // 2 - 1
for i in range(lowerCAmelCase__, -1, -1):
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, len(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = positions[0]
snake_case_ = sys.maxsize
self.top_to_bottom(lowerCAmelCase__, 0, len(lowerCAmelCase__), lowerCAmelCase__)
return temp
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = Heap()
snake_case_ = [0] * len(UpperCAmelCase )
snake_case_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ = []
for vertex in range(len(UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase )
heap.node_position.append(UpperCAmelCase )
snake_case_ = []
snake_case_ = 1
snake_case_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ = 0
snake_case_ = distance
heap.heapify(UpperCAmelCase , UpperCAmelCase )
for _ in range(1 , len(UpperCAmelCase ) ):
snake_case_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase )]
):
snake_case_ = distance
heap.bottom_to_top(
UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
snake_case_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input('''Enter number of edges: ''').strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'spiece.model'}
_A = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
_A = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
_A = '▁'
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_=True , A_=True , A_=False , A_="[CLS]" , A_="[SEP]" , A_="<unk>" , A_="[SEP]" , A_="<pad>" , A_="[CLS]" , A_="[MASK]" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCamelCase =(
AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else mask_token
)
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__UpperCamelCase =do_lower_case
__UpperCamelCase =remove_space
__UpperCamelCase =keep_accents
__UpperCamelCase =vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _a ( self ) -> str:
return len(self.sp_model )
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> Optional[int]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , A_ ) -> Union[str, Any]:
if self.remove_space:
__UpperCamelCase =' '.join(inputs.strip().split() )
else:
__UpperCamelCase =inputs
__UpperCamelCase =outputs.replace('``' , '\"' ).replace('\'\'' , '\"' )
if not self.keep_accents:
__UpperCamelCase =unicodedata.normalize('NFKD' , lowerCAmelCase_ )
__UpperCamelCase =''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
__UpperCamelCase =outputs.lower()
return outputs
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =self.preprocess_text(lowerCAmelCase_ )
__UpperCamelCase =self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
__UpperCamelCase =[]
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
__UpperCamelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase =cur_pieces[1:]
else:
__UpperCamelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def _a ( self , A_ ) -> Any:
return self.sp_model.PieceToId(lowerCAmelCase_ )
def _a ( self , A_ ) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =[]
__UpperCamelCase =''
__UpperCamelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__UpperCamelCase =True
__UpperCamelCase =[]
else:
current_sub_tokens.append(lowerCAmelCase_ )
__UpperCamelCase =False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,) | 365 |
from ....utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=None , A_=2048 ) -> Any:
__UpperCamelCase =config.__dict__
__UpperCamelCase =modal_hidden_size
if num_labels:
__UpperCamelCase =num_labels
| 117 | 0 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : nn.ModuleList , _UpperCamelCase : nn.ModuleList , _UpperCamelCase : List[int] ) -> None:
"""simple docstring"""
snake_case = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_UpperCamelCase ) == len(_UpperCamelCase ), f"""{len(_UpperCamelCase )} != {len(_UpperCamelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
SCREAMING_SNAKE_CASE__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
SCREAMING_SNAKE_CASE__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
try:
snake_case = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(_UpperCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, PreTrainedModel] , _UpperCamelCase : Union[str, Path] = "student" , _UpperCamelCase : Union[int, None] = None , _UpperCamelCase : Union[int, None] = None , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Any=None , _UpperCamelCase : Any=None , **_UpperCamelCase : Optional[int] , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
snake_case = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_UpperCamelCase , _UpperCamelCase ):
AutoTokenizer.from_pretrained(_UpperCamelCase ).save_pretrained(_UpperCamelCase ) # purely for convenience
snake_case = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).eval()
else:
assert isinstance(_UpperCamelCase , _UpperCamelCase ), f"""teacher must be a model or string got type {type(_UpperCamelCase )}"""
snake_case = teacher.config.to_diff_dict()
try:
snake_case ,snake_case = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
snake_case = teacher_e
if d is None:
snake_case = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
snake_case ,snake_case = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
snake_case ,snake_case = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
snake_case = teacher_e
if d is None:
snake_case = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_UpperCamelCase )
# Copy weights
snake_case = teacher.config_class(**_UpperCamelCase )
snake_case = AutoModelForSeqaSeqLM.from_config(_UpperCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
snake_case = student.load_state_dict(teacher.state_dict() , strict=_UpperCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
snake_case ,snake_case = list(range(_UpperCamelCase ) ), list(range(_UpperCamelCase ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(_UpperCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
snake_case = pick_layers_to_copy(_UpperCamelCase , _UpperCamelCase )
if d_layers_to_copy is None:
snake_case = pick_layers_to_copy(_UpperCamelCase , _UpperCamelCase )
try:
if hasattr(
_UpperCamelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _UpperCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _UpperCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _UpperCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _UpperCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _UpperCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , _UpperCamelCase )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
snake_case = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_UpperCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 150 | """simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=7_0_0_0_0 ) -> Optional[int]:
"""simple docstring"""
snake_case = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = np.dot(x.T , h - y ) / y.size
snake_case = theta - alpha * gradient # updating the weights
snake_case = np.dot(_UpperCamelCase , _UpperCamelCase )
snake_case = sigmoid_function(_UpperCamelCase )
snake_case = cost_function(_UpperCamelCase , _UpperCamelCase )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = datasets.load_iris()
SCREAMING_SNAKE_CASE__ = iris.data[:, :2]
SCREAMING_SNAKE_CASE__ = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return sigmoid_function(
np.dot(_UpperCamelCase , _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE__ = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 150 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : int = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" ,lowercase )
if matches:
snake_case : Union[str, Any] = float(matches[1] )
snake_case : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case : List[Any] = 1001
snake_case : List[str] = """imagenet-1k-id2label.json"""
snake_case : Any = """huggingface/label-files"""
snake_case : Union[str, Any] = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : int = {int(lowercase ) + 1: v for k, v in idalabel.items()}
snake_case : List[str] = """background"""
snake_case : Tuple = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=False ) -> int:
snake_case : List[str] = get_mobilenet_va_config(lowercase )
# Load 🤗 model
snake_case : str = MobileNetVaForImageClassification(lowercase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase ,lowercase ,lowercase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} ,size={"""shortest_edge""": config.image_size + 32} ,)
snake_case : Tuple = image_processor(images=prepare_img() ,return_tensors="""pt""" )
snake_case : str = model(**lowercase )
snake_case : str = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
snake_case : Any = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case : Optional[int] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
snake_case : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,lowercase ,atol=1E-4 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
snake_case : List[Any] = """google/""" + model_name
image_processor.push_to_hub(lowercase )
model.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 176 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> int:
snake_case : Any = {}
if frame_sampling_rate is not None:
snake_case : int = frame_sampling_rate
if num_frames is not None:
snake_case : Union[str, Any] = num_frames
snake_case : str = {}
if top_k is not None:
snake_case : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> Dict:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Tuple = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : Optional[int] = BytesIO(requests.get(A ).content )
snake_case : Optional[Any] = VideoReader(A )
videoreader.seek(0 )
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = num_frames * frame_sampling_rate - 1
snake_case : Any = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : int = videoreader.get_batch(A ).asnumpy()
snake_case : List[Any] = list(A )
snake_case : Optional[Any] = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Dict = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> int:
if top_k > self.model.config.num_labels:
snake_case : str = self.model.config.num_labels
if self.framework == "pt":
snake_case : List[Any] = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : Tuple = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : List[Any] = scores.tolist()
snake_case : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 176 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = '''resnet'''
_UpperCAmelCase : Optional[Any] = ['''basic''', '''bottleneck''']
def __init__( self : str , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Optional[int]=64 , lowerCAmelCase__ : Dict=[256, 512, 1024, 2048] , lowerCAmelCase__ : List[str]=[3, 4, 6, 3] , lowerCAmelCase__ : Optional[Any]="bottleneck" , lowerCAmelCase__ : int="relu" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_: Optional[int] = embedding_size
SCREAMING_SNAKE_CASE_: Dict = hidden_sizes
SCREAMING_SNAKE_CASE_: List[Any] = depths
SCREAMING_SNAKE_CASE_: List[Any] = layer_type
SCREAMING_SNAKE_CASE_: Any = hidden_act
SCREAMING_SNAKE_CASE_: Any = downsample_in_first_stage
SCREAMING_SNAKE_CASE_: Tuple = ["stem"] + [F"stage{idx}" for idx in range(1 , len(lowerCAmelCase__) + 1)]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Dict = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 1E-3
| 13 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a__ : str = 1
@register_to_config
def __init__( self : int , _lowercase : int = 10_00 , _lowercase : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase = 4
# running values
__UpperCAmelCase = []
def a ( self : Optional[int] , _lowercase : int , _lowercase : Union[str, torch.device] = None ):
__UpperCAmelCase = num_inference_steps
__UpperCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase = timesteps.to(_lowercase )
__UpperCAmelCase = []
def a ( self : Dict , _lowercase : torch.FloatTensor , _lowercase : int , _lowercase : torch.FloatTensor , _lowercase : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__UpperCAmelCase = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase = timestep_index + 1
__UpperCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def a ( self : List[Any] , _lowercase : torch.FloatTensor , *_lowercase : Dict , **_lowercase : Any ):
return sample
def a ( self : Any , _lowercase : str , _lowercase : Dict , _lowercase : Any , _lowercase : Tuple ):
__UpperCAmelCase = self.alphas[timestep_index]
__UpperCAmelCase = self.betas[timestep_index]
__UpperCAmelCase = self.alphas[prev_timestep_index]
__UpperCAmelCase = self.betas[prev_timestep_index]
__UpperCAmelCase = (sample - sigma * ets) / max(_lowercase , 1E-8 )
__UpperCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 357 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86 | 0 |
"""simple docstring"""
a__ : int = '''Tobias Carryer'''
from time import time
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=int(time() ) ) -> int: # noqa: B008
__SCREAMING_SNAKE_CASE = multiplier
__SCREAMING_SNAKE_CASE = increment
__SCREAMING_SNAKE_CASE = modulo
__SCREAMING_SNAKE_CASE = seed
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a__ : Optional[int] = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 54 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , """decord""" )
self.check_model_type(UpperCAmelCase )
def A ( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None ):
lowerCAmelCase_ : List[str] = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ : str = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ : Tuple = num_frames
lowerCAmelCase_ : Dict = {}
if top_k is not None:
lowerCAmelCase_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , UpperCAmelCase : Union[str, List[str]] , **UpperCAmelCase : Dict ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=1 ):
if num_frames is None:
lowerCAmelCase_ : Optional[int] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowerCAmelCase_ : Union[str, Any] = BytesIO(requests.get(UpperCAmelCase ).content )
lowerCAmelCase_ : Any = VideoReader(UpperCAmelCase )
videoreader.seek(0 )
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Union[str, Any] = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ : Optional[int] = np.linspace(UpperCAmelCase , UpperCAmelCase , num=UpperCAmelCase , dtype=np.intaa )
lowerCAmelCase_ : Optional[Any] = videoreader.get_batch(UpperCAmelCase ).asnumpy()
lowerCAmelCase_ : List[str] = list(UpperCAmelCase )
lowerCAmelCase_ : Any = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A ( self : Dict , UpperCAmelCase : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model(**UpperCAmelCase )
return model_outputs
def A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ : Any = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = probs.topk(UpperCAmelCase )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowerCAmelCase_ : List[Any] = scores.tolist()
lowerCAmelCase_ : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 28 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase_ : List[Any] = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/'''))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , '''src/transformers/models/bert/modeling_bert.py''') , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''') , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = '''src/transformers'''
shutil.rmtree(self.transformer_dir)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
SCREAMING_SNAKE_CASE_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
SCREAMING_SNAKE_CASE_ : Optional[int] = black.format_str(lowercase_ , mode=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.transformer_dir , '''new_code.py''')
with open(lowercase_ , '''w''' , newline='''\n''') as f:
f.write(lowercase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_)
with open(lowercase_ , '''r''') as f:
self.assertTrue(f.read() , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''')
self.assertEqual(lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowercase_) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub('''Bert''' , lowercase_ , lowercase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowercase_ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowercase_) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
SCREAMING_SNAKE_CASE_ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
SCREAMING_SNAKE_CASE_ : Tuple = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['''format_model_list'''])
self.assertFalse(lowercase_)
self.assertEqual(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['''format_model_list'''])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme['''format_model_list'''])
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_)
| 91 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303 | 0 |
def _A ( lowerCAmelCase_ : int = 1000 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = -1
lowerCAmelCase__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCAmelCase__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCAmelCase__ = n - a - b
if c * c == (a * a + b * b):
lowerCAmelCase__ = a * b * c
if candidate >= product:
lowerCAmelCase__ = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 361 |
def _A ( lowerCAmelCase_ : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 221 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A : Tuple = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __lowerCamelCase ( a_ : SplitDict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Tuple = split_dict._to_yaml_list()
assert len(a_ ) == len(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = SplitDict._from_yaml_list(a_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__SCREAMING_SNAKE_CASE :int = None
# the split name of split_dict takes over the name of the split info object
__SCREAMING_SNAKE_CASE :Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=a_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def __lowerCamelCase ( a_ : Dict ) -> Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
__SCREAMING_SNAKE_CASE :Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 191 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = SpeechTaTokenizer
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : int = True
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Dict = SpeechTaTokenizer(__magic_name__ )
snake_case_ : Optional[int] = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ )
snake_case_ : Dict = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = '''this is a test'''
snake_case_ : str = '''this is a test'''
return input_text, output_text
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self.get_input_output_texts(__magic_name__ )
snake_case_ : str = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : List[Any] = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return text, ids
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__magic_name__ ) , 81 )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : List[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ : List[str] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
snake_case_ : str = tokenizer.add_tokens(__magic_name__ )
snake_case_ : Any = tokenizer.vocab_size
snake_case_ : Dict = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) )
snake_case_ : List[str] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
snake_case_ : List[Any] = tokenizer.add_special_tokens(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.vocab_size
snake_case_ : List[Any] = len(__magic_name__ )
self.assertNotEqual(__magic_name__ , 0 )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , len(__magic_name__ ) )
self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) )
snake_case_ : Dict = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ )
self.assertGreaterEqual(len(__magic_name__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : List[str] = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# fmt: off
self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ : Tuple = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
snake_case_ : Any = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
| 279 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase, _a ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = self.tool('''hey''' )
snake_case_ : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = self.tool('''hey''' )
snake_case_ : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 279 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def _lowercase (self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.dummy_uncond_unet
UpperCAmelCase_ = ScoreSdeVeScheduler()
UpperCAmelCase_ = ScoreSdeVePipeline(unet=__a , scheduler=__a )
sde_ve.to(__a )
sde_ve.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__a ).images
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__a , return_dict=__a )[
0
]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "google/ncsnpp-church-256"
UpperCAmelCase_ = UNetaDModel.from_pretrained(__a )
UpperCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(__a )
UpperCAmelCase_ = ScoreSdeVePipeline(unet=__a , scheduler=__a )
sde_ve.to(__a )
sde_ve.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=__a ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 1 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 1 |
'''simple docstring'''
__a: Optional[Any] = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 352 | '''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__a: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : str = {}
if "candidate_labels" in kwargs:
lowercase__ : str = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : str = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="This is a photo of {}." ) -> Any:
lowercase__ : Union[str, Any] = load_image(__lowerCAmelCase )
lowercase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Union[str, Any] = candidate_labels
lowercase__ : int = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
lowercase__ : Any = [text_inputs]
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Any = model_inputs.pop('''candidate_labels''' )
lowercase__ : int = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
lowercase__ : Optional[Any] = text_inputs[0][0]
lowercase__ : Any = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Union[str, Any] = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Any = probs.tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Dict = [scores]
elif self.framework == "tf":
lowercase__ : List[Any] = stable_softmax(__lowerCAmelCase , axis=-1 )
lowercase__ : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowercase__ : Optional[int] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 214 | 0 |
from itertools import product
def lowerCamelCase__ ( a__ : int , a__ : int ) -> list[int]:
UpperCamelCase_ = sides_number
UpperCamelCase_ = max_face_number * dice_number
UpperCamelCase_ = [0] * (max_total + 1)
UpperCamelCase_ = 1
UpperCamelCase_ = range(a__ , max_face_number + 1 )
for dice_numbers in product(a__ , repeat=a__ ):
UpperCamelCase_ = sum(a__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ) -> float:
UpperCamelCase_ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCamelCase_ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCamelCase_ = 0
UpperCamelCase_ = 9
UpperCamelCase_ = 4 * 9
UpperCamelCase_ = 6
for peter_total in range(a__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCamelCase_ = (4**9) * (6**6)
UpperCamelCase_ = peter_wins_count / total_games_number
UpperCamelCase_ = round(a__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_A = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_A = {'''vinai/bartpho-syllable''': 1_024}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = monolingual_vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase_ = {}
UpperCamelCase_ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = cnt
cnt += 1
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCamelCase_ = line.strip().split()[0]
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
if str(__UpperCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ = len(self.fairseq_tokens_to_ids )
UpperCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
UpperCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 122 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = position
_A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_A = []
for position in positions:
_A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowercase )
return permissible_positions
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
if is_complete(__lowercase ):
return True
for position in get_valid_pos(__lowercase , len(__lowercase ) ):
_A = position
if board[y][x] == 0:
_A = curr + 1
if open_knight_tour_helper(__lowercase , __lowercase , curr + 1 ):
return True
_A = 0
return False
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
_A = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
for i in range(__lowercase ):
for j in range(__lowercase ):
_A = 1
if open_knight_tour_helper(__lowercase , (i, j) , 1 ):
return board
_A = 0
_A = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]="" , __UpperCAmelCase : List[str]="train" ):
'''simple docstring'''
assert os.path.isdir(__UpperCAmelCase )
_A = []
_A = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_A = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self : str ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.documents[idx]
_A = document_path.split("/" )[-1]
with open(__UpperCAmelCase , encoding="utf-8" ) as source:
_A = source.read()
_A , _A = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
_A = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
_A = []
_A = deque(__lowercase )
while True:
try:
_A = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_A = list(filter(lambda __lowercase : not t.startswith("@highlight" ) , __lowercase ) )
return story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = torch.ones_like(__lowercase )
_A = sequence == pad_token_id
_A = 0
return mask
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = [tokenizer.encode(__lowercase ) for line in story_lines]
_A = [token for sentence in story_lines_token_ids for token in sentence]
_A = [tokenizer.encode(__lowercase ) for line in summary_lines]
_A = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = []
for sequence in batch:
_A = -1
_A = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase )
| 174 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowercase__:
"""simple docstring"""
a :float
a :TreeNode | None = None
a :TreeNode | None = None
def a ( snake_case__: TreeNode | None ):
'''simple docstring'''
# Validation
def is_valid_tree(snake_case__: TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case__ , snake_case__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case__: TreeNode | None , snake_case__: float , snake_case__: float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case__ )
)
return is_binary_search_tree_recursive_check(snake_case__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowercase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE : Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE : Dict = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE : List[str] = '''yoso.''' + orig_key
return orig_key
def A ( _lowercase , _lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = val
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE : Dict = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowercase , map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE : List[Any] = YosoConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE : str = YosoForMaskedLM(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 182 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
_lowercase : CommonSchedulerState
# setable values
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: CommonSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray ) -> List[str]:
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ )
@dataclass
class _a ( UpperCamelCase__ ):
_lowercase : DDPMSchedulerState
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowercase : jnp.dtype
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return True
@register_to_config
def __init__( self: Any , UpperCamelCase_: int = 1_000 , UpperCamelCase_: float = 0.0001 , UpperCamelCase_: float = 0.02 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[jnp.ndarray] = None , UpperCamelCase_: str = "fixed_small" , UpperCamelCase_: bool = True , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: jnp.dtype = jnp.floataa , ) -> int:
"""simple docstring"""
lowercase__ = dtype
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[int] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: int , UpperCamelCase_: Tuple = () ) -> DDPMSchedulerState:
"""simple docstring"""
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=None ) -> List[Any]:
"""simple docstring"""
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(UpperCamelCase_ , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self: Any , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: int , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: Optional[jax.random.KeyArray] = None , UpperCamelCase_: bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(UpperCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(UpperCamelCase_ , num=1 )
lowercase__ = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: DDPMSchedulerState , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , UpperCamelCase_: jnp.ndarray , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __len__( self: str ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 350 |
import logging
from transformers import PretrainedConfig
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''bertabs'''
def __init__( self: List[str] , UpperCamelCase_: Dict=30_522 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: int=512 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Tuple=0.2 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Union[str, Any]=2_048 , UpperCamelCase_: str=0.2 , **UpperCamelCase_: Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = max_pos
lowercase__ = enc_layers
lowercase__ = enc_hidden_size
lowercase__ = enc_heads
lowercase__ = enc_ff_size
lowercase__ = enc_dropout
lowercase__ = dec_layers
lowercase__ = dec_hidden_size
lowercase__ = dec_heads
lowercase__ = dec_ff_size
lowercase__ = dec_dropout
| 93 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[str] = BioGptTokenizer
a__ : int = False
def _lowercase (self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__a ) )
def _lowercase (self : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = "lower newer"
UpperCAmelCase_ = "lower newer"
return input_text, output_text
def _lowercase (self : str ):
UpperCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = "lower"
UpperCAmelCase_ = ["low", "er</w>"]
UpperCAmelCase_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokens + ["<unk>"]
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowercase ( __snake_case ) -> List[str]:
if isinstance(__snake_case ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A__ :
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[Any]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: float) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.abs((a - b)).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"""Difference between torch and flax is {diff} (>= {tol}).""")
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None , **_SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = after_output[0]
__lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
__lowerCAmelCase : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : List[str] = to_atuple(vision_model.config.image_size)
__lowerCAmelCase : Any = to_atuple(vision_model.config.patch_size)
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowerCAmelCase : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
pt_model.to(_SCREAMING_SNAKE_CASE)
pt_model.eval()
# prepare inputs
__lowerCAmelCase : Union[str, Any] = inputs_dict
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowerCAmelCase : Any = pt_model(**_SCREAMING_SNAKE_CASE).to_tuple()
__lowerCAmelCase : List[Any] = fx_model(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = fx_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE)
pt_model_loaded.to(_SCREAMING_SNAKE_CASE)
pt_model_loaded.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params)
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = self.prepare_config_and_inputs()
__lowerCAmelCase : List[Any] = config_inputs_dict.pop("vision_config")
__lowerCAmelCase : str = config_inputs_dict.pop("text_config")
__lowerCAmelCase : Union[str, Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: str) -> Dict:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Dict = self.get_pretrained_model_and_inputs()
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = after_outputs[0]
__lowerCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5)
@require_flax
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4])
__lowerCAmelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = FlaxViTModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxViTModelTester(self)
__lowerCAmelCase : Optional[Any] = FlaxBertModelTester(self)
__lowerCAmelCase : int = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Tuple = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
__lowerCAmelCase : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
__lowerCAmelCase : str = random_attention_mask([batch_size, 4])
__lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE)
return vision_model, text_model
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = FlaxCLIPVisionModelTester(self)
__lowerCAmelCase : str = FlaxBertModelTester(self)
__lowerCAmelCase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase : Any = vision_config_and_inputs
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
__lowerCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
__lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="np")
__lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase : List[str] = np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3)) | 269 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__: Dict = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[str] = ["GLPNFeatureExtractor"]
__magic_name__: Optional[int] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Tuple = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
def UpperCamelCase ( _A ):
"""simple docstring"""
if not isinstance(_A, _A ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__magic_name__ : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = 10, 0.0
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 10, 0.0
scheduler.set_timesteps(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : int = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[Any] = samplea.shape[0]
UpperCAmelCase_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : int = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
UpperCAmelCase_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Optional[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.full_loop()
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : int = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 61 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = (DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = 10, 0.0
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 10, 0.0
scheduler.set_timesteps(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : List[str] = self.dummy_sample_deter
UpperCAmelCase_ : Any = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : int = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[Any] = samplea.shape[0]
UpperCAmelCase_ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : int = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
UpperCAmelCase_ : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Optional[Any] = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.full_loop()
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : int = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 61 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
# test for the above condition
self.test()
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Any = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase__ : str = self.advance()
if not self.does_advance(lowercase_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowerCAmelCase__ : List[Any] = self.update(lowercase_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self : int ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : int ,lowercase_ : int ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : int ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : List[str] ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any=False ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : List[int] ):
super(lowercase_ ,self ).__init__()
if not isinstance(lowercase_ ,lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(lowercase_ ,lowercase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
lowerCAmelCase__ : Dict = token_ids
lowerCAmelCase__ : Any = len(self.token_ids )
lowerCAmelCase__ : Any = -1 # the index of the currently fulfilled step
lowerCAmelCase__ : Dict = False
def __lowerCAmelCase ( self : Union[str, Any] ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : Dict ,lowercase_ : int ):
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : List[str] ,lowercase_ : int ):
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}' )
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Any = False
if self.does_advance(lowercase_ ):
self.fulfilled_idx += 1
lowerCAmelCase__ : List[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : str = completed
else:
# failed to make progress.
lowerCAmelCase__ : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Any = 0
def __lowerCAmelCase ( self : Optional[int] ):
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Optional[Any]=False ):
lowerCAmelCase__ : Tuple = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase__ : List[str] = self.seqlen
lowerCAmelCase__ : Tuple = self.fulfilled_idx
lowerCAmelCase__ : Optional[Any] = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str ,lowercase_ : List[List[int]] ,lowercase_ : int=True ):
lowerCAmelCase__ : Any = max([len(lowercase_ ) for one in nested_token_ids] )
lowerCAmelCase__ : List[str] = {}
for token_ids in nested_token_ids:
lowerCAmelCase__ : Dict = root
for tidx, token_id in enumerate(lowercase_ ):
if token_id not in level:
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : Any = level[token_id]
if no_subsets and self.has_subsets(lowercase_ ,lowercase_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F' {nested_token_ids}.' )
lowerCAmelCase__ : List[str] = root
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : int = self.trie
for current_token in current_seq:
lowerCAmelCase__ : Tuple = start[current_token]
lowerCAmelCase__ : Any = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : List[Any] = self.next_tokens(lowercase_ )
return len(lowercase_ ) == 0
def __lowerCAmelCase ( self : Dict ,lowercase_ : List[Any] ):
lowerCAmelCase__ : List[Any] = list(root.values() )
if len(lowercase_ ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase_ ) for nn in next_nodes] )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Tuple ,lowercase_ : Tuple ):
lowerCAmelCase__ : Dict = self.count_leaves(lowercase_ )
return len(lowercase_ ) != leaf_count
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Dict ,lowercase_ : List[List[int]] ):
super(lowercase_ ,self ).__init__()
if not isinstance(lowercase_ ,lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(lowercase_ ,lowercase_ ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(lowercase_ ,lowercase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
lowerCAmelCase__ : str = DisjunctiveTrie(lowercase_ )
lowerCAmelCase__ : List[Any] = nested_token_ids
lowerCAmelCase__ : List[Any] = self.trie.max_height
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = False
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : int = self.trie.next_tokens(self.current_seq )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : str ,lowercase_ : int ):
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}' )
lowerCAmelCase__ : str = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self : Any ,lowercase_ : int ):
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}' )
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Dict = False
if self.does_advance(lowercase_ ):
self.current_seq.append(lowercase_ )
lowerCAmelCase__ : Dict = True
else:
lowerCAmelCase__ : Dict = True
self.reset()
lowerCAmelCase__ : int = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase__ : int = completed
return stepped, completed, reset
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = []
def __lowerCAmelCase ( self : Any ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self : Any ,lowercase_ : Union[str, Any]=False ):
lowerCAmelCase__ : Optional[Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase__ : int = self.seqlen
lowerCAmelCase__ : Any = self.current_seq
lowerCAmelCase__ : List[str] = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : List[Constraint] ):
lowerCAmelCase__ : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase__ : str = max([c.seqlen for c in constraints] )
lowerCAmelCase__ : str = len(lowercase_ )
lowerCAmelCase__ : int = False
self.init_state()
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[int] = [constraint.copy(stateful=lowercase_ ) for constraint in self.constraints]
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase__ : List[str] = constraint.advance()
if isinstance(lowercase_ ,lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
token_list.extend(lowercase_ )
else:
lowerCAmelCase__ : Dict = self.inprogress_constraint.advance()
if isinstance(lowercase_ ,lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
token_list.extend(lowercase_ )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase__ : Any = self.add(lowercase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ):
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
lowerCAmelCase__ : Any = False, False
if self.completed:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase__ : Dict = self.inprogress_constraint.update(lowercase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase_ ) )
lowerCAmelCase__ : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase__ : str = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase__ : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase_ ):
lowerCAmelCase__ : Optional[int] = pending_constraint.update(lowercase_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = None
if not complete and stepped:
lowerCAmelCase__ : int = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase__ : Dict = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase__ : int = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Optional[Any]=True ):
lowerCAmelCase__ : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase__ : Union[str, Any] = [
constraint.copy(stateful=lowercase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase__ : List[str] = self.inprogress_constraint.copy(stateful=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 370 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 74 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =3_84
__UpperCamelCase =7
if "tiny" in model_name:
__UpperCamelCase =96
__UpperCamelCase =(2, 2, 6, 2)
__UpperCamelCase =(3, 6, 12, 24)
elif "small" in model_name:
__UpperCamelCase =96
__UpperCamelCase =(2, 2, 18, 2)
__UpperCamelCase =(3, 6, 12, 24)
elif "base" in model_name:
__UpperCamelCase =1_28
__UpperCamelCase =(2, 2, 18, 2)
__UpperCamelCase =(4, 8, 16, 32)
__UpperCamelCase =12
__UpperCamelCase =5_12
elif "large" in model_name:
__UpperCamelCase =1_92
__UpperCamelCase =(2, 2, 18, 2)
__UpperCamelCase =(6, 12, 24, 48)
__UpperCamelCase =12
__UpperCamelCase =7_68
# set label information
__UpperCamelCase =1_50
__UpperCamelCase ="huggingface/label-files"
__UpperCamelCase ="ade20k-id2label.json"
__UpperCamelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =SwinConfig(
embed_dim=_lowerCamelCase , depths=_lowerCamelCase , num_heads=_lowerCamelCase , window_size=_lowerCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
__UpperCamelCase =UperNetConfig(
backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.stages.{i}.downsample.reduction.weight', F'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.weight', F'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.bias', F'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =dct.pop(_lowerCamelCase )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase =state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
__UpperCamelCase =state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[:dim, :]
__UpperCamelCase =in_proj_bias[: dim]
__UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase =in_proj_bias[
dim : dim * 2
]
__UpperCamelCase =in_proj_weight[
-dim :, :
]
__UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =x.shape
__UpperCamelCase =x.reshape(_lowerCamelCase , 4 , in_channel // 4 )
__UpperCamelCase =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowerCamelCase , _lowerCamelCase )
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =x.shape
__UpperCamelCase =x.reshape(_lowerCamelCase , in_channel // 4 , 4 )
__UpperCamelCase =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowerCamelCase , _lowerCamelCase )
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =x.shape[0]
__UpperCamelCase =x.reshape(4 , in_channel // 4 )
__UpperCamelCase =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowerCamelCase )
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =x.shape[0]
__UpperCamelCase =x.reshape(in_channel // 4 , 4 )
__UpperCamelCase =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowerCamelCase )
return x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase ={
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__UpperCamelCase =model_name_to_url[model_name]
__UpperCamelCase =torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' , file_name=_lowerCamelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
__UpperCamelCase =get_upernet_config(_lowerCamelCase )
__UpperCamelCase =UperNetForSemanticSegmentation(_lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase =state_dict.pop(_lowerCamelCase )
if "bn" in key:
__UpperCamelCase =key.replace('bn' , 'batch_norm' )
__UpperCamelCase =val
# rename keys
__UpperCamelCase =create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__UpperCamelCase =reverse_correct_unfold_reduction_order(_lowerCamelCase )
if "norm" in key:
__UpperCamelCase =reverse_correct_unfold_norm_order(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify on image
__UpperCamelCase ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCamelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('RGB' )
__UpperCamelCase =SegformerImageProcessor()
__UpperCamelCase =processor(_lowerCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
__UpperCamelCase =model(_lowerCamelCase )
__UpperCamelCase =outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__UpperCamelCase =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__UpperCamelCase =torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__UpperCamelCase =torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__UpperCamelCase =torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCamelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 62 | """simple docstring"""
import numpy as np
from PIL import Image
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Dict = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
__lowerCamelCase : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : str = 0
# compute the shape of the output matrix
__lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 135 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def __lowerCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ):
a__: str =AutoTokenizer.from_pretrained("bert-base-cased" )
a__: List[Any] =load_dataset("glue" , "mrpc" )
def tokenize_function(__magic_name__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
a__: Optional[Any] =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__: Any =datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__: List[Any] =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__magic_name__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__: Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__: Tuple =16
elif accelerator.mixed_precision != "no":
a__: List[Any] =8
else:
a__: Optional[Any] =None
return tokenizer.pad(
__magic_name__ , padding="longest" , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors="pt" , )
# Instantiate dataloaders.
a__: Dict =DataLoader(
tokenized_datasets["train"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
a__: List[Any] =DataLoader(
tokenized_datasets["validation"] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , __magic_name__ ) == "1":
a__: Any =2
# Initialize accelerator
a__: Union[str, Any] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__: Optional[Any] =config["lr"]
a__: List[str] =int(config["num_epochs"] )
a__: Any =int(config["seed"] )
a__: Dict =int(config["batch_size"] )
a__: Dict =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__: Union[str, Any] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__: Optional[Any] =batch_size // MAX_GPU_BATCH_SIZE
a__: int =MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
a__: Any =get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__: str =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__: List[Any] =model.to(accelerator.device )
# Instantiate optimizer
a__: Tuple =AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
a__: Optional[Any] =get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__: Tuple =accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__: Optional[int] =model(**__magic_name__ )
a__: Tuple =outputs.loss
a__: int =loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__: Any =0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__: List[str] =model(**__magic_name__ )
a__: Any =outputs.logits.argmax(dim=-1 )
a__: Dict =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__magic_name__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__: Union[str, Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__: Optional[int] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
a__: List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__magic_name__ , default=__magic_name__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__: Any =parser.parse_args()
a__: Any ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 351 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowerCamelCase ( __magic_name__ : List[Any] ):
if hor == 128:
a__: Union[str, Any] =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: Optional[int] =(32, 128, 256)
a__: Tuple =("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
a__: Tuple =("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
a__: List[Any] =(32, 64, 128, 256)
a__: List[str] =("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
a__: Dict =torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
a__: Optional[int] =model.state_dict()
a__: str ={
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 65_536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
a__: List[Any] =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: Tuple =dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: Dict =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def __lowerCamelCase ( ):
a__: Union[str, Any] ={
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 65_536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
a__: Union[str, Any] =torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
a__: Any =model
a__: str =UNetaDModel(**__magic_name__ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
a__: List[str] =dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__: List[Any] =state_dict.pop(__magic_name__ )
hf_value_function.load_state_dict(__magic_name__ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : str=7 , UpperCamelCase : Tuple=6 , UpperCamelCase : Any=17 , UpperCamelCase : Dict=23 , UpperCamelCase : int=11 , UpperCamelCase : Dict=True , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : List[str] = act_dim
__UpperCAmelCase : List[Any] = state_dim
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : List[Any] = max_length
__UpperCAmelCase : Optional[int] = is_training
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__UpperCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__UpperCAmelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
__UpperCAmelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
__UpperCAmelCase : Union[str, Any] = random_attention_mask((self.batch_size, self.seq_length) )
__UpperCAmelCase : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = DecisionTransformerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,(
__UpperCAmelCase
) ,
) : Tuple = config_and_inputs
__UpperCAmelCase : int = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A , A , A , unittest.TestCase ):
"""simple docstring"""
__a = (DecisionTransformerModel,) if is_torch_available() else ()
__a = ()
__a = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__a = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__a = False
__a = False
__a = False
__a = False
__a = False
__a = False
__a = False
__a = False
__a = False
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = DecisionTransformerModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = DecisionTransformerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase )
__UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : List[Any] = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(UpperCamelCase )] , UpperCamelCase )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
__UpperCAmelCase : Any = 10 # defined by the RL environment, may be normalized
__UpperCAmelCase : Optional[int] = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__UpperCAmelCase : List[str] = model.to(UpperCamelCase )
__UpperCAmelCase : int = model.config
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase , dtype=torch.floataa ) # env.reset()
__UpperCAmelCase : List[str] = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase , device=UpperCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__UpperCAmelCase : Tuple = state
__UpperCAmelCase : Tuple = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase , dtype=torch.floataa )
__UpperCAmelCase : int = torch.zeros(1 , 0 , device=UpperCamelCase , dtype=torch.floataa )
__UpperCAmelCase : List[str] = torch.tensor(0 , device=UpperCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase ):
__UpperCAmelCase : Optional[int] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase )] , dim=1 )
__UpperCAmelCase : Tuple = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase )] , dim=1 )
__UpperCAmelCase : Any = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = model(
states=UpperCamelCase , actions=UpperCamelCase , rewards=UpperCamelCase , returns_to_go=UpperCamelCase , timesteps=UpperCamelCase , attention_mask=UpperCamelCase , return_dict=UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__UpperCAmelCase : List[Any] = action_pred[0, -1]
__UpperCAmelCase : Dict = torch.cat([states, state] , dim=1 )
__UpperCAmelCase : Tuple = returns_to_go[0, -1] - reward
__UpperCAmelCase : Any = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__UpperCAmelCase : List[str] = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 115 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = list(model.children() )[:-2]
__UpperCAmelCase : Tuple = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : str = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer
__UpperCAmelCase : List[str] = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max_seq_length
__UpperCAmelCase : Tuple = transforms
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Union[str, Any] = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Any = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[int] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 115 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return [ord(_lowerCAmelCase ) - 9_6 for elem in plain]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: List[str] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _lowerCAmelCase )
print('''Decoded:''' , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Union[str, Any]:
a =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
a =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Optional[int]:
a =parent
a =batch_size
a =seq_length
a =is_training
a =use_input_mask
a =use_token_type_ids
a =use_labels
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =type_sequence_label_size
a =initializer_range
a =num_labels
a =num_choices
a =scope
a =embedding_size
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a =None
if self.use_input_mask:
a =random_attention_mask([self.batch_size, self.seq_length] )
a =None
if self.use_token_type_ids:
a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a =None
a =None
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a =ids_tensor([self.batch_size] , self.num_choices )
a =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
a =TFMobileBertModel(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
a =[input_ids, input_mask]
a =model(__A )
a =model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
a =TFMobileBertForMaskedLM(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
a =TFMobileBertForNextSentencePrediction(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
a =TFMobileBertForPreTraining(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
a =self.num_labels
a =TFMobileBertForSequenceClassification(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
a =self.num_choices
a =TFMobileBertForMultipleChoice(config=__A )
a =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
a ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
a =self.num_labels
a =TFMobileBertForTokenClassification(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
a =TFMobileBertForQuestionAnswering(config=__A )
a ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
a =model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) =config_and_inputs
a ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =TFMobileBertModelTest.TFMobileBertModelTester(self )
a =ConfigTester(self , config_class=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
a =TFMobileBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
a =tf.constant([[0, 1, 2, 3, 4, 5]] )
a =model(__A )[0]
a =[1, 6, 3_0522]
self.assertEqual(output.shape , __A )
a =tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-4 ) | 81 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCamelCase : Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCamelCase : Optional[Any] = "document_qa"
UpperCamelCase : Any = AutoProcessor
UpperCamelCase : Optional[int] = VisionEncoderDecoderModel
UpperCamelCase : Any = ["image", "text"]
UpperCamelCase : str = ["text"]
def __init__( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A , **A )
def __A ( self , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase = task_prompt.replace("""{user_input}""" , A )
lowerCamelCase = self.pre_processor.tokenizer(
A , add_special_tokens=A , return_tensors="""pt""" ).input_ids
lowerCamelCase = self.pre_processor(A , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A , ).sequences
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.pre_processor.batch_decode(A )[0]
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCamelCase = re.sub(r"""<.*?>""" , """""" , A , count=1 ).strip() # remove first task start token
lowerCamelCase = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 252 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ = """docs/source/en/_toctree.yml"""
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[str] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def _A (__a=False ) -> Tuple:
"""simple docstring"""
with open(__a , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : str = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ : str = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : List[str] = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : str = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : int = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 361 |
"""simple docstring"""
from collections import defaultdict
def _A (__a , __a ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = first_str.lower().strip()
SCREAMING_SNAKE_CASE_ : List[Any] = second_str.lower().strip()
# Remove whitespace
SCREAMING_SNAKE_CASE_ : Dict = first_str.replace(''' ''' , '''''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__a ) != len(__a ):
return False
# Default values for count should be 0
SCREAMING_SNAKE_CASE_ : defaultdict[str, int] = defaultdict(__a )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__a ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : Any = input("""Enter the first string """).strip()
UpperCAmelCase_ : Optional[int] = input("""Enter the second string """).strip()
UpperCAmelCase_ : Union[str, Any] = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
| 318 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase_ : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase_ : List[str] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowerCAmelCase_ : Dict = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowerCAmelCase_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase_ : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 63 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""image_processor""", """tokenizer"""]
a_ ="""OwlViTImageProcessor"""
a_ =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , _a : str=None , _a : Optional[Any]=None , **_a : int ) -> List[str]:
__lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _a , )
__lowerCamelCase : Tuple = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a , _a )
def __call__( self : Tuple , _a : Tuple=None , _a : int=None , _a : List[Any]=None , _a : List[Any]="max_length" , _a : Union[str, Any]="np" , **_a : Union[str, Any] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )):
__lowerCamelCase : Any = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )]
elif isinstance(_a , _a ) and isinstance(text[0] , _a ):
__lowerCamelCase : List[Any] = []
# Maximum number of queries across batch
__lowerCamelCase : str = max([len(_a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_a ) != max_num_queries:
__lowerCamelCase : List[Any] = t + [' '] * (max_num_queries - len(_a ))
__lowerCamelCase : Dict = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )
encodings.append(_a )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__lowerCamelCase : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : str = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCamelCase : int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : List[str] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCamelCase : str = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__lowerCamelCase : Optional[Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCamelCase : List[Any] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : int = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__lowerCamelCase : Any = BatchEncoding()
__lowerCamelCase : Dict = input_ids
__lowerCamelCase : str = attention_mask
if query_images is not None:
__lowerCamelCase : Optional[int] = BatchEncoding()
__lowerCamelCase : List[Any] = self.image_processor(
_a , return_tensors=_a , **_a ).pixel_values
__lowerCamelCase : str = query_pixel_values
if images is not None:
__lowerCamelCase : Union[str, Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
__lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def _lowercase ( self : Optional[Any] , *_a : List[str] , **_a : Dict ) -> int:
return self.image_processor.post_process(*_a , **_a )
def _lowercase ( self : str , *_a : str , **_a : List[str] ) -> int:
return self.image_processor.post_process_object_detection(*_a , **_a )
def _lowercase ( self : int , *_a : List[Any] , **_a : Optional[int] ) -> str:
return self.image_processor.post_process_image_guided_detection(*_a , **_a )
def _lowercase ( self : Tuple , *_a : Optional[Any] , **_a : List[Any] ) -> Tuple:
return self.tokenizer.batch_decode(*_a , **_a )
def _lowercase ( self : str , *_a : Optional[Any] , **_a : str ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def _lowercase ( self : Any ) -> Any:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , )
return self.image_processor_class
@property
def _lowercase ( self : Union[str, Any] ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , )
return self.image_processor
| 208 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=None ,) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = parent
__SCREAMING_SNAKE_CASE :Any = batch_size
__SCREAMING_SNAKE_CASE :List[str] = seq_length
__SCREAMING_SNAKE_CASE :int = is_training
__SCREAMING_SNAKE_CASE :Optional[int] = use_input_mask
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :int = hidden_size
__SCREAMING_SNAKE_CASE :Tuple = projection_dim
__SCREAMING_SNAKE_CASE :str = num_hidden_layers
__SCREAMING_SNAKE_CASE :Dict = num_attention_heads
__SCREAMING_SNAKE_CASE :int = intermediate_size
__SCREAMING_SNAKE_CASE :Any = dropout
__SCREAMING_SNAKE_CASE :List[str] = attention_dropout
__SCREAMING_SNAKE_CASE :List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Dict = initializer_range
__SCREAMING_SNAKE_CASE :int = scope
__SCREAMING_SNAKE_CASE :List[Any] = bos_token_id
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE :Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__SCREAMING_SNAKE_CASE :Any = input_mask.numpy()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = input_mask.shape
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :Any = 0
__SCREAMING_SNAKE_CASE :Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = TFBlipTextModel(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE :List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (TFBlipTextModel,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = BlipTextModelTester(self )
__SCREAMING_SNAKE_CASE :List[str] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :List[str] = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=True ) -> Union[str, Any]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE__ ) | 239 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForSequenceClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :int = downstream_dict['''projector.weight''']
__SCREAMING_SNAKE_CASE :List[Any] = downstream_dict['''projector.bias''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForAudioFrameClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.linear.weight''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : int ) -> List[str]:
__SCREAMING_SNAKE_CASE :List[str] = UniSpeechSatForXVector.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''connector.weight''']
__SCREAMING_SNAKE_CASE :Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE :str = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__SCREAMING_SNAKE_CASE :int = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__SCREAMING_SNAKE_CASE :Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__SCREAMING_SNAKE_CASE :Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__SCREAMING_SNAKE_CASE :Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__SCREAMING_SNAKE_CASE :str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCamelCase ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE :str = torch.load(a_ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE :str = checkpoint['''Downstream''']
__SCREAMING_SNAKE_CASE :str = UniSpeechSatConfig.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__SCREAMING_SNAKE_CASE :str = convert_classification(a_ , a_ , a_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__SCREAMING_SNAKE_CASE :Tuple = convert_diarization(a_ , a_ , a_ )
elif arch.endswith('''ForXVector''' ):
__SCREAMING_SNAKE_CASE :List[Any] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE :Dict = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 239 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : set ) -> int:
__a , __a = len(lowerCAmelCase__ ), len(grid[0] )
if (
min(lowerCAmelCase__ , lowerCAmelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__a = 0
count += depth_first_search(lowerCAmelCase__ , row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , row - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , col - 1 , lowerCAmelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | """simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44 | 0 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_A = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = _TestCommandArgs(dataset=a_, all_configs=a_, save_infos=a_ )
lowerCamelCase : List[Any] = TestCommand(*a_ )
test_command.run()
lowerCamelCase : str = os.path.join(a_, 'README.md' )
assert os.path.exists(a_ )
lowerCamelCase : Tuple = DatasetInfosDict.from_directory(a_ )
lowerCamelCase : int = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ), splits=[
{
'name': 'train',
'num_bytes': 235_1563,
'num_examples': 1_0000,
},
{
'name': 'validation',
'num_bytes': 23_8418,
'num_examples': 1000,
},
], download_size=394_0680, dataset_size=258_9981, )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase : Any = getattr(dataset_infos['default'], a_ ), getattr(expected_dataset_infos['default'], a_ )
if key == "num_bytes":
assert is_apercent_close(a_, a_ )
elif key == "splits":
assert list(a_ ) == list(a_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes, expected[split].num_bytes )
else:
result == expected
| 362 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_A = logging.get_logger('transformers.models.speecht5')
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCamelCase : str = checkpoint['input_conv.weight_g']
lowerCamelCase : int = checkpoint['input_conv.weight_v']
lowerCamelCase : Optional[Any] = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
lowerCamelCase : Tuple = checkpoint[F"""upsamples.{i}.1.weight_g"""]
lowerCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_v"""]
lowerCamelCase : List[str] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
lowerCamelCase : Tuple = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
lowerCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
lowerCamelCase : Dict = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
lowerCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
lowerCamelCase : Any = checkpoint['output_conv.1.weight_g']
lowerCamelCase : Tuple = checkpoint['output_conv.1.weight_v']
lowerCamelCase : int = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_, a_=None, a_=None, ):
'''simple docstring'''
if config_path is not None:
lowerCamelCase : str = SpeechTaHifiGanConfig.from_pretrained(a_ )
else:
lowerCamelCase : Dict = SpeechTaHifiGanConfig()
lowerCamelCase : int = SpeechTaHifiGan(a_ )
lowerCamelCase : Optional[Any] = torch.load(a_ )
load_weights(orig_checkpoint['model']['generator'], a_, a_ )
lowerCamelCase : Tuple = np.load(a_ )
lowerCamelCase : str = stats[0].reshape(-1 )
lowerCamelCase : Optional[int] = stats[1].reshape(-1 )
lowerCamelCase : Dict = torch.from_numpy(a_ ).float()
lowerCamelCase : Optional[int] = torch.from_numpy(a_ ).float()
model.save_pretrained(a_ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 205 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=18 , __lowerCamelCase=30 , __lowerCamelCase=400 , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=None , ):
'''simple docstring'''
__A : str = size if size is not None else {'''height''': 20, '''width''': 20}
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : List[Any] = num_channels
__A : Union[str, Any] = image_size
__A : Any = min_resolution
__A : str = max_resolution
__A : Any = size
__A : Dict = do_normalize
__A : str = do_convert_rgb
__A : List[Any] = [512, 1024, 2048, 4096]
__A : Dict = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCamelCase__( self ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__A : Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = self.image_processor_tester.prepare_dummy_image()
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
__A : Dict = 2048
__A : Any = image_processor(__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Dict = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Tuple = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__A : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowerCamelCase ):
__A : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
__A : Any = '''Hello'''
__A : Tuple = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Union[str, Any] = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
__A : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Union[str, Any] = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__A : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : Dict = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = PixaStructImageProcessingTester(self , num_channels=4 )
__A : Any = 3
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_convert_rgb''' ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__A : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__A : Any = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__A : int = image_processor(
__lowerCamelCase , return_tensors='''pt''' , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 179 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self : Dict , _snake_case : List[str]=0):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor((1, 3, 128, 128) , rng=random.Random(_a))
UpperCAmelCase_ = np.random.RandomState(_a)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
# warmup pass to apply optimizations
UpperCAmelCase_ = pipe(**self.get_dummy_inputs())
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = self.get_dummy_inputs()
UpperCAmelCase_ = pipe(**_a).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_ = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
UpperCAmelCase_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_a , image=_a , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
UpperCAmelCase_ = init_image.resize((768, 512))
UpperCAmelCase_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
UpperCAmelCase_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a)
UpperCAmelCase_ = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ = np.random.RandomState(0)
UpperCAmelCase_ = pipe(
prompt=_a , image=_a , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_ = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 362 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''open-llama'''
def __init__( self , _lowerCamelCase=10_0000 , _lowerCamelCase=4096 , _lowerCamelCase=1_1008 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase="silu" , _lowerCamelCase=2048 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-6 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) -> Optional[int]:
A_ : List[str] = vocab_size
A_ : Dict = max_position_embeddings
A_ : Dict = hidden_size
A_ : List[Any] = intermediate_size
A_ : int = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Any = hidden_act
A_ : int = initializer_range
A_ : Dict = rms_norm_eps
A_ : Optional[Any] = use_cache
A_ : int = kwargs.pop(
"""use_memorry_efficient_attention""" , _lowerCamelCase )
A_ : Dict = hidden_dropout_prob
A_ : Optional[Any] = attention_dropout_prob
A_ : Optional[int] = use_stable_embedding
A_ : List[Any] = shared_input_output_embedding
A_ : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"got {self.rope_scaling}" )
A_ : List[Any] = self.rope_scaling.get("""type""" , _lowerCamelCase )
A_ : Union[str, Any] = self.rope_scaling.get("""factor""" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 1 |
import logging
from transformers import PretrainedConfig
_lowercase: Tuple = logging.getLogger(__name__)
_lowercase: Union[str, Any] = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "bertabs"
def __init__(self , lowerCamelCase_=30522 , lowerCamelCase_=512 , lowerCamelCase_=6 , lowerCamelCase_=512 , lowerCamelCase_=8 , lowerCamelCase_=512 , lowerCamelCase_=0.2 , lowerCamelCase_=6 , lowerCamelCase_=768 , lowerCamelCase_=8 , lowerCamelCase_=2048 , lowerCamelCase_=0.2 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = vocab_size
a = max_pos
a = enc_layers
a = enc_hidden_size
a = enc_heads
a = enc_ff_size
a = enc_dropout
a = dec_layers
a = dec_hidden_size
a = dec_heads
a = dec_ff_size
a = dec_dropout
| 71 |
# using dfs for finding eulerian path traversal
def a( A : int , A : Optional[Any] , A : Any , A : Optional[int]=None ) -> List[str]:
"""simple docstring"""
a = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a = True, True
a = dfs(A , A , A , A )
return path
def a( A : List[str] , A : Optional[int] ) -> List[str]:
"""simple docstring"""
a = 0
a = -1
for i in range(A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a( A : str , A : str ) -> List[Any]:
"""simple docstring"""
a = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a = check_circuit_or_path(A , A )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
a = 1
if check == 2:
a = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
a = dfs(A , A , A )
print(A )
def a( ) -> int:
"""simple docstring"""
a = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a = {
1: [],
2: []
# all degree is zero
}
a = 10
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
check_euler(A , A )
if __name__ == "__main__":
main()
| 71 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[Any] = """▁"""
_UpperCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertGenerationTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : List[Any] ) -> List[str]:
super().setUp()
lowerCamelCase__ : Dict = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : List[str] = '<s>'
lowerCamelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase ) , 1002 )
def A_ ( self : List[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = BertGenerationTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase__ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ) -> Tuple:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = 'Hello World!'
lowerCamelCase__ : Dict = [18536, 2260, 101]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A_ ( self : Optional[Any] ) -> str:
lowerCamelCase__ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCamelCase__ : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : int ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase__ : int = ' '.join(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase )
lowerCamelCase__ : Tuple = BertGenerationConfig()
lowerCamelCase__ : Optional[Any] = BertGenerationEncoder(UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[Any]:
# fmt: off
lowerCamelCase__ : Any = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 50 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowercase: Any = ["small", "medium", "large"]
_lowercase: Tuple = "lm_head.decoder.weight"
_lowercase: List[Any] = "lm_head.weight"
def a( A : str , A : str ) -> Dict:
"""simple docstring"""
a = torch.load(A )
a = d.pop(A )
os.makedirs(A , exist_ok=A )
torch.save(A , os.path.join(A , A ) )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_lowercase: int = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowercase: List[Any] = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
_lowercase: List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 71 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_lowercase: Any = logging.get_logger(__name__)
def a( A : bool , A : bool ) -> List[str]:
"""simple docstring"""
def run_func(A : Union[str, Any] ):
@wraps(A )
def run_in_eager_mode(*A : int , **A : List[str] ):
return func(*A , **A )
@wraps(A )
@tf.function(experimental_compile=A )
def run_in_graph_mode(*A : List[str] , **A : Optional[int] ):
return func(*A , **A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a( A : int , A : int , A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
a = random.Random()
a = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = 42
__A = 42
__A = "TensorFlow"
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return tf.__version__
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_inference_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_speed(_inference )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_train_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_speed(_train )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase_ )
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_inference_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_memory(_inference )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase_ )
a = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
a = self._prepare_train_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_memory(_train )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
a = (
hasattr(lowerCamelCase_ , "architectures" )
and isinstance(config.architectures , lowerCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
a = __import__("transformers" , fromlist=[model_class] )
a = getattr(lowerCamelCase_ , lowerCamelCase_ )
a = model_cls(lowerCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
a = TF_MODEL_MAPPING[config.__class__](lowerCamelCase_ )
# encoder-decoder has vocab size saved differently
a = config.vocab_size if hasattr(lowerCamelCase_ , "vocab_size" ) else config.encoder.vocab_size
a = random_input_ids(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , training=lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase_ , training=lowerCamelCase_ )
a = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
a = (
hasattr(lowerCamelCase_ , "architectures" )
and isinstance(config.architectures , lowerCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
a = __import__("transformers" , fromlist=[model_class] )
a = getattr(lowerCamelCase_ , lowerCamelCase_ )
a = model_cls(lowerCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
a = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase_ )
# encoder-decoder has vocab size saved differently
a = config.vocab_size if hasattr(lowerCamelCase_ , "vocab_size" ) else config.encoder.vocab_size
a = random_input_ids(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
a = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )[0]
a = tf.gradients(lowerCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
a = model(lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )[0]
a = tf.gradients(lowerCamelCase_ , model.trainable_variables )
return gradients
a = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(lowerCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a = timeit.repeat(
lowerCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
a = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
a = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
a = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase_ )
a = meminfo.used
a = Memory(lowerCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
a = None
else:
a = measure_peak_memory_cpu(lowerCamelCase_ )
a = Memory(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
a = stop_memory_tracing(lowerCamelCase_ )
if memory is None:
a = summary.total
else:
a = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 71 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline
lowercase_ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowercase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
lowercase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCamelCase_ ( self , snake_case_ , snake_case_=0 ):
"""simple docstring"""
if str(snake_case_ ).startswith('mps' ):
A_ : Tuple = torch.manual_seed(snake_case_ )
else:
A_ : Any = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : Any = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 286 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 286 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = '''xmod'''
def __init__( self ,__UpperCAmelCase=3_0522 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=2 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=("en_XX",) ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Any:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : Any = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : Any = classifier_dropout
lowerCAmelCase__ : int = pre_norm
lowerCAmelCase__ : List[Any] = adapter_reduction_factor
lowerCAmelCase__ : Optional[int] = adapter_layer_norm
lowerCAmelCase__ : str = adapter_reuse_layer_norm
lowerCAmelCase__ : Dict = ln_before_adapter
lowerCAmelCase__ : int = list(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = default_language
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 184 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = True , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = False , UpperCamelCase = 100 , UpperCamelCase = 0.01 , UpperCamelCase = 1 , ):
"""simple docstring"""
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Optional[Any] = search_prob
lowerCAmelCase__ : Tuple = start_temperate
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = None
while not search_end:
lowerCAmelCase__ : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase__ : List[str] = current_state
scores.append(UpperCamelCase )
iterations += 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase__ : Tuple = random.randint(0 , len(UpperCamelCase ) - 1 ) # picking a random neighbor
lowerCAmelCase__ : str = neighbors.pop(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase__ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase__ : Tuple = picked_neighbor
else:
lowerCAmelCase__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase__ : List[Any] = picked_neighbor
lowerCAmelCase__ : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase ) , UpperCamelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 184 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : int = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def lowercase ( __snake_case : Any , __snake_case : int , __snake_case : Dict=8 ):
lowercase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCAmelCase ( _A ):
def __init__( self : List[Any] , A : UNetaDConditionModel , A : DDPMScheduler , A : VQModel , ) -> Any:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
lowercase_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : Optional[Any] , A : Optional[Any] , A : Any , A : List[str] , A : Optional[int] , A : Optional[int] , A : List[str] ) -> List[str]:
if latents is None:
lowercase_ : str = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase_ : List[Any] = latents.to(A )
lowercase_ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A ( self : Optional[Any] , A : Union[str, Any]=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase_ : str = torch.device(F'''cuda:{gpu_id}''' )
lowercase_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def A ( self : Union[str, Any] , A : Optional[int]=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase_ : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Any = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : str ) -> str:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self : List[Any] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : Union[torch.FloatTensor, List[torch.FloatTensor]] , A : torch.FloatTensor , A : int = 5_12 , A : int = 5_12 , A : int = 1_00 , A : float = 4.0 , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , ) -> List[Any]:
lowercase_ : List[str] = self._execution_device
lowercase_ : Optional[Any] = guidance_scale > 1.0
if isinstance(A , A ):
lowercase_ : str = torch.cat(A , dim=0 )
if isinstance(A , A ):
lowercase_ : Optional[int] = torch.cat(A , dim=0 )
if isinstance(A , A ):
lowercase_ : Optional[Any] = torch.cat(A , dim=0 )
lowercase_ : List[str] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase_ : Any = image_embeds.repeat_interleave(A , dim=0 )
lowercase_ : Tuple = negative_image_embeds.repeat_interleave(A , dim=0 )
lowercase_ : Optional[int] = hint.repeat_interleave(A , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
lowercase_ : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
lowercase_ : Optional[Any] = self.scheduler.timesteps
lowercase_ : Union[str, Any] = self.movq.config.latent_channels
lowercase_ , lowercase_ : Any = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
lowercase_ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Dict = {'''image_embeds''': image_embeds, '''hint''': hint}
lowercase_ : Dict = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : List[str] = variance_pred.chunk(2 )
lowercase_ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Union[str, Any] = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
lowercase_ : List[Any] = self.movq.decode(A , force_not_quantize=A )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 33 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = IFInpaintingSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
a :Any = torch.manual_seed(_lowerCamelCase )
else:
a :Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
a :List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
a :List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 371 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[str] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'efficientnet'
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.001 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Optional[int] = num_channels
a :List[str] = image_size
a :int = width_coefficient
a :Optional[Any] = depth_coefficient
a :Any = depth_divisor
a :Any = kernel_sizes
a :Tuple = in_channels
a :Union[str, Any] = out_channels
a :Any = depthwise_padding
a :Any = strides
a :Optional[Any] = num_block_repeats
a :Tuple = expand_ratios
a :Dict = squeeze_expansion_ratio
a :int = hidden_act
a :Dict = hidden_dim
a :Tuple = pooling_type
a :Any = initializer_range
a :Tuple = batch_norm_eps
a :Optional[int] = batch_norm_momentum
a :List[Any] = dropout_rate
a :Optional[int] = drop_connect_rate
a :Tuple = sum(_lowerCamelCase ) * 4
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-5
| 281 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
'''simple docstring'''
import random
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Any = num - 1
lowercase_ :List[Any] = 0
while s % 2 == 0:
lowercase_ :Union[str, Any] = s // 2
t += 1
for _ in range(5 ):
lowercase_ :Optional[Any] = random.randrange(2 ,num - 1 )
lowercase_ :int = pow(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
if v != 1:
lowercase_ :Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase_ :Any = i + 1
lowercase_ :Tuple = (v**2) % num
return True
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if num < 2:
return False
lowercase_ :List[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int = 10_24 ):
while True:
lowercase_ :Tuple = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__lowerCamelCase ):
return num
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] =generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 360 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] ={
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( _lowerCAmelCase ):
__A = "gpt_neo"
__A = ["past_key_values"]
__A = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , lowercase : Tuple=50_257 , lowercase : Optional[Any]=2_048 , lowercase : Union[str, Any]=2_048 , lowercase : int=24 , lowercase : Optional[Any]=[[["global", "local"], 12]] , lowercase : List[Any]=16 , lowercase : List[str]=None , lowercase : Union[str, Any]=256 , lowercase : Optional[Any]="gelu_new" , lowercase : Any=0.0 , lowercase : List[Any]=0.0 , lowercase : Any=0.0 , lowercase : str=0.1 , lowercase : Dict=1e-5 , lowercase : List[str]=0.02 , lowercase : Union[str, Any]=True , lowercase : int=50_256 , lowercase : Union[str, Any]=50_256 , **lowercase : Dict , ):
"""simple docstring"""
lowercase_ :str = vocab_size
lowercase_ :Tuple = max_position_embeddings
lowercase_ :Tuple = hidden_size
lowercase_ :List[str] = num_layers
lowercase_ :int = num_heads
lowercase_ :Union[str, Any] = intermediate_size
lowercase_ :Tuple = window_size
lowercase_ :Any = activation_function
lowercase_ :Tuple = resid_dropout
lowercase_ :Any = embed_dropout
lowercase_ :str = attention_dropout
lowercase_ :List[str] = classifier_dropout
lowercase_ :List[Any] = layer_norm_epsilon
lowercase_ :List[str] = initializer_range
lowercase_ :int = use_cache
lowercase_ :Tuple = bos_token_id
lowercase_ :Optional[Any] = eos_token_id
lowercase_ :int = attention_types
lowercase_ :Tuple = self.expand_attention_types_params(lowercase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
@staticmethod
def lowercase__ ( lowercase : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Dict ):
import torch
lowercase_ :List[str] = input.size()
lowercase_ :Union[str, Any] = len(__lowerCamelCase )
lowercase_ :Any = shape[dimension]
lowercase_ :str = torch.arange(0 ,__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Union[str, Any] = torch.div(sizedim - size ,__lowerCamelCase ,rounding_mode="floor" ) + 1
lowercase_ :int = torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None]
lowercase_ :List[Any] = [slice(__lowerCamelCase )] * rank
lowercase_ :int = indices
lowercase_ :Dict = input[s]
lowercase_ :List[str] = list(range(0 ,rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ):
import torch
lowercase_ :List[Any] = torch.arange(1 ,__lowerCamelCase )
lowercase_ :int = torch.remainder(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Optional[int] = remainders == 0
lowercase_ :int = candidates[divisor_indices]
lowercase_ :Tuple = torch.max(__lowerCamelCase )
return largest_divisor, torch.div(__lowerCamelCase ,__lowerCamelCase ,rounding_mode="floor" )
class a_ ( _lowerCAmelCase ):
@property
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="inputs" )
lowercase_ :Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase_ :str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
return self._config.num_heads
def lowercase__ ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
"""simple docstring"""
lowercase_ :List[str] = super(lowercase , self ).generate_dummy_inputs(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
# We need to order the input in the way they appears in the forward()
lowercase_ :Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase_ , lowercase_ :Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase_ :Any = seqlen + 2
lowercase_ :List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase_ :Dict = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers )
]
lowercase_ :Tuple = common_inputs["attention_mask"]
if self.use_past:
lowercase_ :Optional[int] = ordered_inputs["attention_mask"].dtype
lowercase_ :List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : int ):
"""simple docstring"""
return 13
| 147 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=9_9 , lowerCAmelCase_ : Any=3_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Optional[Any]=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Union[str, Any]=5_1_2 , lowerCAmelCase_ : Dict=1_6 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=0 , ) -> str:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = projection_dim
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
__lowerCAmelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> str:
__lowerCAmelCase = TFDPRContextEncoder(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase ( self : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Tuple:
__lowerCAmelCase = TFDPRQuestionEncoder(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = TFDPRReader(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a_ = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = TFDPRModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[int] ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCAmelCase_ )
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCAmelCase_ )
@slow
def lowercase ( self : List[Any] ) -> Tuple:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDPRContextEncoder.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDPRContextEncoder.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDPRReader.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
__lowerCAmelCase = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
__lowerCAmelCase = model(lowerCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__lowerCAmelCase = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 284 |
import re
from filelock import FileLock
try:
import nltk
_snake_case : Any = True
except (ImportError, ModuleNotFoundError):
_snake_case : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a_ ( lowerCAmelCase_ : str ):
re.sub('<n>', '', lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 284 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Dict = "Hello world! cécé herlolip"
def A ( snake_case :int , snake_case :Optional[Any] , snake_case :Tuple ) -> str:
__UpperCamelCase = FairseqRobertaModel.from_pretrained(_lowerCAmelCase )
roberta.eval() # disable dropout
__UpperCamelCase = roberta.model.encoder.sentence_encoder
__UpperCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__UpperCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _lowerCAmelCase )
__UpperCamelCase = XLMRobertaXLForSequenceClassification(_lowerCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase = roberta_sent_encoder.embed_tokens.weight
__UpperCamelCase = roberta_sent_encoder.embed_positions.weight
__UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__UpperCamelCase = roberta_sent_encoder.layer_norm.weight
__UpperCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase = model.roberta.encoder.layer[i]
__UpperCamelCase = roberta_sent_encoder.layers[i]
__UpperCamelCase = layer.attention
__UpperCamelCase = roberta_layer.self_attn_layer_norm.weight
__UpperCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__UpperCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__UpperCamelCase = roberta_layer.self_attn.q_proj.weight
__UpperCamelCase = roberta_layer.self_attn.q_proj.bias
__UpperCamelCase = roberta_layer.self_attn.k_proj.weight
__UpperCamelCase = roberta_layer.self_attn.k_proj.bias
__UpperCamelCase = roberta_layer.self_attn.v_proj.weight
__UpperCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__UpperCamelCase = roberta_layer.self_attn.out_proj.weight
__UpperCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__UpperCamelCase = roberta_layer.final_layer_norm.weight
__UpperCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
__UpperCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__UpperCamelCase = roberta_layer.fca.weight
__UpperCamelCase = roberta_layer.fca.bias
# output
__UpperCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__UpperCamelCase = roberta_layer.fca.weight
__UpperCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__UpperCamelCase = roberta.model.classification_heads["""mnli"""].dense.weight
__UpperCamelCase = roberta.model.classification_heads["""mnli"""].dense.bias
__UpperCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight
__UpperCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__UpperCamelCase = roberta.model.encoder.lm_head.dense.weight
__UpperCamelCase = roberta.model.encoder.lm_head.dense.bias
__UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase = roberta.model.encoder.lm_head.weight
__UpperCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase = roberta.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
__UpperCamelCase = model(_lowerCAmelCase )[0]
if classification_head:
__UpperCamelCase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_lowerCAmelCase ) )
else:
__UpperCamelCase = roberta.model(_lowerCAmelCase )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__UpperCamelCase = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCamelCase : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 371 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = field
__UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__UpperCamelCase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = 'utf-8'
__UpperCamelCase = to_json_kwargs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase )
__UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' )
__UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer:
__UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
__UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 263 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( A , A , A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = StableUnCLIPImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
SCREAMING_SNAKE_CASE_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset([] )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = 32
__SCREAMING_SNAKE_CASE :Optional[int] = embedder_hidden_size
# image encoding components
__SCREAMING_SNAKE_CASE :Optional[int] = CLIPImageProcessor(crop_size=32 ,size=32 )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase__ ,projection_dim=lowerCamelCase__ ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,) )
# regular denoising components
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Any = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Any = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=lowerCamelCase__ ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,) )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :int = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='''projection''' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=lowerCamelCase__ ,layers_per_block=1 ,upcast_attention=lowerCamelCase__ ,use_linear_projection=lowerCamelCase__ ,)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :int = DDIMScheduler(
beta_schedule='''scaled_linear''' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,prediction_type='''v_prediction''' ,set_alpha_to_one=lowerCamelCase__ ,steps_offset=1 ,)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :int = AutoencoderKL()
__SCREAMING_SNAKE_CASE :int = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=True ) -> Tuple:
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :int = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if pil_image:
__SCREAMING_SNAKE_CASE :Union[str, Any] = input_image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE :List[Any] = input_image.clamp(0 ,1 )
__SCREAMING_SNAKE_CASE :Optional[int] = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__SCREAMING_SNAKE_CASE :Dict = DiffusionPipeline.numpy_to_pil(lowerCamelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableUnCLIPImgaImgPipeline(**lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Tuple = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_inputs(lowerCamelCase__ )
inputs.update({'''image_embeds''': None} )
__SCREAMING_SNAKE_CASE :Optional[int] = sd_pipe(**lowerCamelCase__ ).images
__SCREAMING_SNAKE_CASE :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase__ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase__ )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
__SCREAMING_SNAKE_CASE :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
__SCREAMING_SNAKE_CASE :str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(lowerCamelCase__ ,'''anime turle''' ,generator=lowerCamelCase__ ,output_type='''np''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
__SCREAMING_SNAKE_CASE :int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE :int = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(lowerCamelCase__ ,'''anime turle''' ,generator=lowerCamelCase__ ,output_type='''np''' )
__SCREAMING_SNAKE_CASE :Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE :int = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' ,torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE :Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE :Tuple = pipe(
lowerCamelCase__ ,'''anime turtle''' ,num_inference_steps=2 ,output_type='''np''' ,)
__SCREAMING_SNAKE_CASE :List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 191 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[str] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
A_ :Optional[Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def A ( a_ ,a_ ) -> str:
__UpperCamelCase : Any ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__UpperCamelCase : Tuple =int(re.match(r'.*layer_(\d*).*' ,a_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def A ( a_ ) -> Any:
if dtype == torch.bool:
return 1 / 8
__UpperCamelCase : Dict =re.search(r'[^\d](\d+)$' ,str(a_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
__UpperCamelCase : Tuple =int(bit_search.groups()[0] )
return bit_size // 8
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict:
# Construct model
if bloom_config_file == "":
__UpperCamelCase : List[Any] =BloomConfig()
else:
__UpperCamelCase : List[str] =BloomConfig.from_json_file(a_ )
if shard_model:
__UpperCamelCase : int =os.listdir(a_ )
__UpperCamelCase : Union[str, Any] =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Optional[Any] ={'weight_map': {}, 'metadata': {}}
__UpperCamelCase : Dict =0
__UpperCamelCase : int =None
__UpperCamelCase : Any =BloomConfig()
for j, file in enumerate(a_ ):
print('Processing file: {}'.format(a_ ) )
__UpperCamelCase : Optional[int] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Dict =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : Optional[Any] =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : int =list(temp.keys() )
for key in keys:
__UpperCamelCase : Dict =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Any =temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : Any =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Optional[Any] =tensors[key] / pretraining_tp
torch.save(
a_ ,os.path.join(
a_ ,'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
__UpperCamelCase : Union[str, Any] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__UpperCamelCase : int ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) )
__UpperCamelCase : Union[str, Any] =BloomConfig()
__UpperCamelCase : Tuple =pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCamelCase : Optional[int] =total_size
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ ,WEIGHTS_NAME + '.index.json' ) ,'w' ,encoding='utf-8' ) as f:
__UpperCamelCase : List[Any] =json.dumps(a_ ,indent=2 ,sort_keys=a_ ) + '\n'
f.write(a_ )
else:
__UpperCamelCase : List[Any] =BloomModel(a_ )
__UpperCamelCase : Optional[Any] =os.listdir(a_ )
__UpperCamelCase : Dict =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Any =None
for i, file in enumerate(a_ ):
__UpperCamelCase : Union[str, Any] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Optional[Any] =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : str =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : List[str] =list(temp.keys() )
for key in keys:
__UpperCamelCase : Union[str, Any] =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Optional[Any] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : int =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Dict =tensors[key] / pretraining_tp
__UpperCamelCase : str =model.load_state_dict(a_ ,strict=a_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__UpperCamelCase : str =set(other_keys.missing_keys )
else:
__UpperCamelCase : int =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Dict =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__UpperCamelCase : List[str] =model.to(config.torch_dtype )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
A_ :str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 71 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( __snake_case : int="" ) -> Dict:
__A : Tuple = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = torch.rand(12 , dtype=torch.floataa) - 0.5
__A : Any = AgentAudio(_SCREAMING_SNAKE_CASE)
__A : Dict = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE))
# Ensure that the file contains the same value as the original tensor
__A : int = sf.read(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , torch.tensor(_SCREAMING_SNAKE_CASE) , atol=1e-4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = torch.rand(12 , dtype=torch.floataa) - 0.5
__A : Tuple = get_new_path(suffix='.wav')
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1_6000)
__A : Optional[int] = AgentAudio(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4))
self.assertEqual(agent_type.to_string() , _SCREAMING_SNAKE_CASE)
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = torch.randint(0 , 256 , (64, 64, 3))
__A : List[str] = AgentImage(_SCREAMING_SNAKE_CASE)
__A : Dict = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1e-4))
self.assertIsInstance(agent_type.to_raw() , Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = Path(get_tests_dir('fixtures/tests_samples/COCO')) / "000000039769.png"
__A : Dict = Image.open(_SCREAMING_SNAKE_CASE)
__A : List[Any] = AgentImage(_SCREAMING_SNAKE_CASE)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO')) / "000000039769.png"
__A : Any = Image.open(_SCREAMING_SNAKE_CASE)
__A : List[str] = AgentImage(_SCREAMING_SNAKE_CASE)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE))
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = "Hey!"
__A : Optional[int] = AgentText(_SCREAMING_SNAKE_CASE)
self.assertEqual(_SCREAMING_SNAKE_CASE , agent_type.to_string())
self.assertEqual(_SCREAMING_SNAKE_CASE , agent_type.to_raw())
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) | 364 |
'''simple docstring'''
lowercase__ : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> Tuple:
__A : List[str] = start
# add current to visited
visited.append(__snake_case )
__A : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
__A : Dict = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Tuple = topological_sort('''a''', [], [])
print(sort) | 190 | 0 |
def a ( A__ : int , A__ : list ) -> int:
"""simple docstring"""
_enforce_args(A__ , A__ )
if n == 0:
return 0
_lowercase =float('-inf' )
for i in range(1 , n + 1 ):
_lowercase =max(
A__ , prices[i - 1] + naive_cut_rod_recursive(n - i , A__ ) )
return max_revue
def a ( A__ : int , A__ : list ) -> Any:
"""simple docstring"""
_enforce_args(A__ , A__ )
_lowercase =[float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(A__ , A__ , A__ )
def a ( A__ : int , A__ : list , A__ : list ) -> Tuple:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowercase =float('-inf' )
for i in range(1 , n + 1 ):
_lowercase =max(
A__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , A__ , A__ ) , )
_lowercase =max_revenue
return max_rev[n]
def a ( A__ : int , A__ : list ) -> Union[str, Any]:
"""simple docstring"""
_enforce_args(A__ , A__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowercase =[float('-inf' ) for _ in range(n + 1 )]
_lowercase =0
for i in range(1 , n + 1 ):
_lowercase =max_rev[i]
for j in range(1 , i + 1 ):
_lowercase =max(A__ , prices[j - 1] + max_rev[i - j] )
_lowercase =max_revenue_i
return max_rev[n]
def a ( A__ : int , A__ : list ) -> Dict:
"""simple docstring"""
if n < 0:
_lowercase =F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(A__ )
if n > len(A__ ):
_lowercase =(
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(A__ )}'''
)
raise ValueError(A__ )
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =[6, 10, 12, 15, 20, 23]
_lowercase =len(A__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowercase =36
_lowercase =top_down_cut_rod(A__ , A__ )
_lowercase =bottom_up_cut_rod(A__ , A__ )
_lowercase =naive_cut_rod_recursive(A__ , A__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 205 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False , ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
_lowercase =False
_lowercase =nn.Dropout(p=lowerCAmelCase )
_lowercase =TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
_lowercase =nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
_lowercase =TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
_lowercase =TaLayerNorm(lowerCAmelCase )
_lowercase =nn.Dropout(p=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =self.token_embedder(lowerCAmelCase )
_lowercase =encoder_input_tokens.shape[1]
_lowercase =torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
_lowercase =self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
_lowercase =encoder_input_tokens.size()
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
_lowercase =lyr(lowerCAmelCase , lowerCAmelCase )[0]
_lowercase =self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 205 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> int:
A_ = -1
A_ = 0
for a in range(1, n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
A_ = n - a - b
if c * c == (a * a + b * b):
A_ = a * b * c
if candidate >= product:
A_ = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if number != int(UpperCAmelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A_ = [-1] * (number + 1)
A_ = 0
for i in range(1, number + 1 ):
A_ = sys.maxsize
A_ = int(math.sqrt(UpperCAmelCase__ ) )
for j in range(1, root + 1 ):
A_ = 1 + answers[i - (j**2)]
A_ = min(UpperCAmelCase__, UpperCAmelCase__ )
A_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__a = 2_9979_2458
# Symbols
__a , __a , __a , __a = symbols("ct x y z")
def __snake_case( _lowerCAmelCase ) -> float:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def __snake_case( _lowerCAmelCase ) -> float:
return 1 / sqrt(1 - beta(_lowerCAmelCase ) ** 2 )
def __snake_case( _lowerCAmelCase ) -> np.ndarray:
return np.array(
[
[gamma(_lowerCAmelCase ), -gamma(_lowerCAmelCase ) * beta(_lowerCAmelCase ), 0, 0],
[-gamma(_lowerCAmelCase ) * beta(_lowerCAmelCase ), gamma(_lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
snake_case__ : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__a = transform(2997_9245)
print("Example of four vector: ")
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
__a = {ct: c, x: 1, y: 1, z: 1}
__a = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 35 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = AlbertTokenizer
lowerCamelCase = AlbertTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[str],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = '<pad>'
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> str:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<pad>' )
self.assertEqual(vocab_keys[1],'<unk>' )
self.assertEqual(vocab_keys[-1],'▁eloquent' )
self.assertEqual(len(lowercase_ ),3_0_0_0_0 )
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 )
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],)
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_ )
A__ = tokenizer.encode('sequence builders' )
A__ = tokenizer.encode('multi-sequence build' )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
| 7 | 0 |
def snake_case_ ( ) -> int:
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(snake_case , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 288 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ = 101 ) -> Any:
'''simple docstring'''
lowercase__: Any = length
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return i
class __a :
def __call__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __a ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__: List[str] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __a ( __UpperCamelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( __UpperCamelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: List[str] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = list(range(len(snake_case ) ) )
lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 288 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase :Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''PoolFormerFeatureExtractor''']
lowerCamelCase :Dict = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 206 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__(self , lowercase , **lowercase ):
return super().__call__(lowercase , **lowercase )
def _a (self , **lowercase ):
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A_ : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a (self , lowercase , lowercase=None , lowercase="This is a sound of {}." ):
if isinstance(lowercase , lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A_ : Dict = requests.get(lowercase ).content
else:
with open(lowercase , """rb""" ) as f:
A_ : List[str] = f.read()
if isinstance(lowercase , lowercase ):
A_ : List[Any] = ffmpeg_read(lowercase , self.feature_extractor.sampling_rate )
if not isinstance(lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A_ : int = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A_ : List[Any] = candidate_labels
A_ : str = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : Optional[Any] = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Optional[Any] = [text_inputs]
return inputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_inputs.pop("""candidate_labels""" )
A_ : List[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowercase ):
A_ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
A_ : Optional[int] = text_inputs[0][0]
A_ : str = self.model(**lowercase , **lowercase )
A_ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
A_ : List[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A_ : Optional[Any] = logits.softmax(dim=0 )
A_ : str = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A_ : Optional[int] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result | 206 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCamelCase_ : int = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[Union[str, int]] = None
__UpperCamelCase : Optional[Union[str, int]] = None
__UpperCamelCase : Optional[Union[str, int]] = None
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = _str_to_version_tuple(self.version_str )
def __repr__( self : List[Any] ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowerCAmelCase__ ( self : Dict ):
return self.major, self.minor, self.patch
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Optional[Any] ):
if isinstance(snake_case_ , snake_case_ ):
return Version(snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
return other
raise TypeError(f'''{other} (type {type(snake_case_ )}) cannot be compared to version.''' )
def __eq__( self : Dict , snake_case_ : str ):
try:
UpperCamelCase_: int = self._validate_operand(snake_case_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Tuple , snake_case_ : List[Any] ):
UpperCamelCase_: Any = self._validate_operand(snake_case_ )
return self.tuple < other.tuple
def __hash__( self : List[Any] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCAmelCase__ ( cls : Dict , snake_case_ : Optional[Any] ):
UpperCamelCase_: Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCAmelCase__ ( self : Optional[Any] ):
return self.version_str
def A__ ( lowerCamelCase ) -> List[Any]:
UpperCamelCase_: int = _VERSION_REG.match(lowerCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowerCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
return ".".join(str(lowerCamelCase ) for v in version_tuple )
| 223 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int]=0 ):
UpperCamelCase_: str = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
UpperCamelCase_: Optional[int] = np.random.RandomState(snake_case_ )
UpperCamelCase_: Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = self.get_dummy_inputs()
UpperCamelCase_: Any = pipe(**snake_case_ ).images
UpperCamelCase_: Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: str = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = self.get_dummy_inputs()
UpperCamelCase_: Dict = pipe(**snake_case_ ).images
UpperCamelCase_: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Any = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
UpperCamelCase_: Union[str, Any] = pipe(**self.get_dummy_inputs() )
UpperCamelCase_: Tuple = self.get_dummy_inputs()
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Optional[int] = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase_: List[Any] = pipe(**snake_case_ ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: List[str] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = self.get_dummy_inputs()
UpperCamelCase_: Any = pipe(**snake_case_ ).images
UpperCamelCase_: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Tuple = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
UpperCamelCase_: str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[Any] = self.get_dummy_inputs()
UpperCamelCase_: Optional[int] = pipe(**snake_case_ ).images
UpperCamelCase_: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCamelCase_: Tuple = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = ort.SessionOptions()
UpperCamelCase_: Optional[int] = False
return options
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCamelCase_: Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Dict = """A fantasy landscape, trending on artstation"""
UpperCamelCase_: List[str] = np.random.RandomState(0 )
UpperCamelCase_: List[str] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: List[str] = output.images
UpperCamelCase_: Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCamelCase_: Tuple = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCamelCase_: List[Any] = init_image.resize((768, 512) )
UpperCamelCase_: Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
UpperCamelCase_: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Any = """A fantasy landscape, trending on artstation"""
UpperCamelCase_: Dict = np.random.RandomState(0 )
UpperCamelCase_: Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
UpperCamelCase_: Optional[int] = output.images
UpperCamelCase_: Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCamelCase_: Optional[int] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 223 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """realm"""
def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=128 , lowercase_=12 , lowercase_=12 , lowercase_=8 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=256 , lowercase_=10 , lowercase_=1E-3 , lowercase_=5 , lowercase_=320 , lowercase_=1335_3718 , lowercase_=5000 , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
# Common config
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : List[str] = retriever_proj_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Optional[int] = num_candidates
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[str] = layer_norm_eps
# Reader config
UpperCAmelCase_ : int = span_hidden_size
UpperCAmelCase_ : Optional[Any] = max_span_width
UpperCAmelCase_ : Dict = reader_layer_norm_eps
UpperCAmelCase_ : Optional[int] = reader_beam_size
UpperCAmelCase_ : Union[str, Any] = reader_seq_len
# Retrieval config
UpperCAmelCase_ : Union[str, Any] = num_block_records
UpperCAmelCase_ : Optional[Any] = searcher_beam_size
| 61 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Any = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = FlaxViTModel(config=lowercase_ )
UpperCAmelCase_ : int = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Optional[Any] = (self.image_size, self.image_size)
UpperCAmelCase_ : List[Any] = (self.patch_size, self.patch_size)
UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Tuple = FlaxViTForImageClassification(config=lowercase_ )
UpperCAmelCase_ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Optional[int] = FlaxViTForImageClassification(lowercase_ )
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxViTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ )
UpperCAmelCase_ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Union[str, Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Tuple = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
| 61 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase_ = '''sshleifer/bart-tiny-random'''
lowerCamelCase_ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase_ )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_ )
| 358 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 111 | 0 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(__SCREAMING_SNAKE_CASE ) for s in shape] )}.npy'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=(4, 4, 64, 64) , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Any = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : int = jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return image
def _snake_case ( self , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
lowercase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : Union[str, Any] = '''bf16''' if fpaa else None
lowercase_ , lowercase_ : Dict = FlaxUNetaDConditionModel.from_pretrained(
__SCREAMING_SNAKE_CASE , subfolder='''unet''' , dtype=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE )
return model, params
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=(4, 77, 7_68) , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase_ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , dtype=__SCREAMING_SNAKE_CASE )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.get_latents(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.apply(
{'''params''': params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase_ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ : Tuple = jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_latents(__SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.get_encoder_hidden_states(__SCREAMING_SNAKE_CASE , shape=(4, 77, 10_24) , fpaa=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = model.apply(
{'''params''': params} , __SCREAMING_SNAKE_CASE , jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase_ : Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase_ : List[Any] = jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
| 93 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''Pix2StructImageProcessor'''
SCREAMING_SNAKE_CASE = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : int = True , UpperCAmelCase_ : Tuple = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : str = 2_048 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : int = False , UpperCAmelCase_ : Union[str, Any] = False , UpperCAmelCase_ : List[Any] = False , UpperCAmelCase_ : List[str] = False , UpperCAmelCase_ : Optional[Any] = False , UpperCAmelCase_ : List[Any] = True , UpperCAmelCase_ : Any = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
__UpperCAmelCase : List[str] = self.tokenizer
__UpperCAmelCase : Optional[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__UpperCAmelCase : Union[str, Any] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , **UpperCAmelCase_ )
else:
# add pixel_values and bbox
__UpperCAmelCase : Any = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and not self.image_processor.is_vqa:
__UpperCAmelCase : List[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if "attention_mask" in text_encoding:
__UpperCAmelCase : Dict = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
__UpperCAmelCase : int = text_encoding.pop("input_ids" )
else:
__UpperCAmelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.tokenizer.model_input_names
__UpperCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 351 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
__UpperCAmelCase : List[str] = 4
__UpperCAmelCase : int = (1 << p) - 1
for _ in range(p - 2 ):
__UpperCAmelCase : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 98 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 368 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase = False
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase_ ( self : Any ):
return 12
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 12
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return 32
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCamelCase_ ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__snake_case )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = 12
UpperCAmelCase_ = 12
UpperCAmelCase_ = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
UpperCAmelCase_ = TransformeraDModel(**__snake_case )
return model
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.dummy_vqvae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_transformer
UpperCAmelCase_ = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase_ = LearnedClassifierFreeSamplingEmbeddings(learnable=__snake_case )
UpperCAmelCase_ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = '''teddy bear playing in the pool'''
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase_ = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.dummy_vqvae
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_transformer
UpperCAmelCase_ = VQDiffusionScheduler(self.num_embed )
UpperCAmelCase_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=__snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
UpperCAmelCase_ = VQDiffusionPipeline(
vqvae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , transformer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
UpperCAmelCase_ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = '''teddy bear playing in the pool'''
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe([prompt] , generator=__snake_case , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipe(
[prompt] , generator=__snake_case , output_type='''np''' , return_dict=__snake_case , num_inference_steps=2 )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCAmelCase_ = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
UpperCAmelCase_ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
UpperCAmelCase_ = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(0 )
UpperCAmelCase_ = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 177 | 0 |
'''simple docstring'''
from random import randint, random
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , snake_case__ = 5 , ):
__UpperCamelCase : Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : int = max(__a , 0 )
while i < number_of_cells:
__UpperCamelCase : Optional[int] = (
randint(0 , __a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : int = 0
__UpperCamelCase : Tuple = highway_now[car_index + 1 :]
for cell in range(len(__a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__a , -1 )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = len(__a )
# Beforce calculations, the highway is empty
__UpperCamelCase : List[Any] = [-1] * number_of_cells
for car_index in range(__a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__UpperCamelCase : Optional[int] = min(highway_now[car_index] + 1 , __a )
# Number of empty cell before the next car
__UpperCamelCase : Tuple = get_distance(__a , __a ) - 1
# We can't have the car causing an accident
__UpperCamelCase : List[str] = min(next_highway[car_index] , __a )
if random() < probability:
# Randomly, a driver will slow down
__UpperCamelCase : List[str] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = len(highway[0] )
for i in range(__a ):
__UpperCamelCase : Optional[int] = update(highway[i] , __a , __a )
__UpperCamelCase : List[str] = [-1] * number_of_cells
for car_index in range(__a ):
__UpperCamelCase : Optional[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__UpperCamelCase : int = (car_index + speed) % number_of_cells
# Commit the change of position
__UpperCamelCase : List[str] = speed
highway.append(__a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 |
from __future__ import annotations
from PIL import Image
# Define glider example
A : Any = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
A__ = []
for i in range(len(__a ) ):
A__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]:
"""simple docstring"""
A__ = []
for _ in range(__a ):
# Create output image
A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) )
A__ = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
A__ = 2_5_5 - cells[y][x] * 2_5_5
A__ = (colour, colour, colour)
# Save image
images.append(__a )
A__ = new_generation(__a )
return images
if __name__ == "__main__":
A : str = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 274 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Optional[Any]:
"""simple docstring"""
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''''''
else:
A__ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = dct.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = DeiTConfig()
# all deit models have fine-tuned heads
A__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ = 1_000
A__ = '''huggingface/label-files'''
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(deit_name[-6:-4] )
A__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif deit_name[9:].startswith('''small''' ):
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# load original model from timm
A__ = timm.create_model(lowercase_ , pretrained=lowercase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
A__ = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ )
# load HuggingFace model
A__ = DeiTForImageClassificationWithTeacher(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ = DeiTImageProcessor(size=lowercase_ , crop_size=config.image_size )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = encoding['''pixel_values''']
A__ = model(lowercase_ )
A__ = timm_model(lowercase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 351 |
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0) ->None:
'''simple docstring'''
A__ , A__ = row, column
A__ = [[default_value for c in range(UpperCAmelCase__)] for r in range(UpperCAmelCase__)]
def __str__( self : List[str]) ->str:
'''simple docstring'''
A__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(UpperCAmelCase__ , len(str(UpperCAmelCase__)))
A__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float]) -> str:
nonlocal string_format_identifier
A__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__) for row_vector in self.array)
return s
def __repr__( self : Tuple) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase__ , (list, tuple)) and len(UpperCAmelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
A__ = value
def __add__( self : Optional[int] , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self : str) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : Union[str, Any] , UpperCAmelCase__ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , (int, float)): # Scalar multiplication
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = f"""Unsupported type given for another ({type(UpperCAmelCase__)})"""
raise TypeError(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 231 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( enum.Enum ):
'''simple docstring'''
a__ = 0
a__ = 1
@add_end_docstrings(a__ )
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
a__ = """generated"""
def __init__( self : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _lowercase ( self : Any , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : str , ) -> List[str]:
"""simple docstring"""
__magic_name__ = {}
if truncation is not None:
__magic_name__ = truncation
__magic_name__ = generate_kwargs
__magic_name__ = {}
if return_tensors is not None and return_type is None:
__magic_name__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__magic_name__ = return_type
if clean_up_tokenization_spaces is not None:
__magic_name__ = clean_up_tokenization_spaces
if stop_sequence is not None:
__magic_name__ = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__magic_name__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return True
def _lowercase ( self : Any , *UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , __lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
__magic_name__ = ([prefix + arg for arg in args[0]],)
__magic_name__ = True
elif isinstance(args[0] , __lowerCAmelCase ):
__magic_name__ = (prefix + args[0],)
__magic_name__ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
__magic_name__ = self.tokenizer(*__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
if (
isinstance(args[0] , __lowerCAmelCase )
and all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for el in args[0] )
and all(len(__lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _lowercase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__magic_name__ = self._parse_and_tokenize(__lowerCAmelCase , truncation=__lowerCAmelCase , **__lowerCAmelCase )
return inputs
def _lowercase ( self : List[Any] , UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
if self.framework == "pt":
__magic_name__ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__magic_name__ = tf.shape(model_inputs["""input_ids"""] ).numpy()
__magic_name__ = generate_kwargs.get("""min_length""" , self.model.config.min_length )
__magic_name__ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__lowerCAmelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
__magic_name__ = self.model.generate(**__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ = output_ids.shape[0]
if self.framework == "pt":
__magic_name__ = output_ids.reshape(__lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__magic_name__ = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _lowercase ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=ReturnType.TEXT , UpperCamelCase__ : int=False ) -> Any:
"""simple docstring"""
__magic_name__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__magic_name__ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
__magic_name__ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
}
records.append(__lowerCAmelCase )
return records
@add_end_docstrings(a__ )
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
a__ = """summary"""
def __call__( self : Dict , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(a__ )
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
a__ = """translation"""
def _lowercase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator(\'...\', max_length=400)""" )
return True
def _lowercase ( self : List[str] , *UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , """_build_translation_inputs""" , __lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*__lowerCAmelCase , return_tensors=self.framework , truncation=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase )
else:
return super()._parse_and_tokenize(*__lowerCAmelCase , truncation=__lowerCAmelCase )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._sanitize_parameters(**__lowerCAmelCase )
if src_lang is not None:
__magic_name__ = src_lang
if tgt_lang is not None:
__magic_name__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__magic_name__ = kwargs.get("""task""" , self.task )
__magic_name__ = task.split("""_""" )
if task and len(__lowerCAmelCase ) == 4:
# translation, XX, to YY
__magic_name__ = items[1]
__magic_name__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().__call__(*__lowerCAmelCase , **__lowerCAmelCase )
| 88 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = 0
while number > 0:
lowercase__ : str = number % 10
sum_of_digits += last_digit
lowercase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCamelCase ( UpperCAmelCase = 100 ):
lowercase__ : Optional[int] = factorial(UpperCAmelCase )
lowercase__ : Dict = split_and_add(UpperCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 198 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def __lowercase ( ):
UpperCamelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCamelCase_ : Optional[Any] = parser.parse_args()
return args.f
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
"""simple docstring"""
UpperCamelCase_ : Any = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(snake_case , 'argv' , snake_case ):
UpperCamelCase_ : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case , 0.666 )
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(snake_case )
UpperCamelCase_ : List[Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(snake_case )
UpperCamelCase_ : Dict = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(snake_case )
| 50 | from typing import Any
class _lowercase :
def __init__( self : Optional[Any] , snake_case : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = data
UpperCamelCase_ : Any = None
def __repr__( self : int ) -> str:
"""simple docstring"""
return f"Node({self.data})"
class _lowercase :
def __init__( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = None
def __iter__( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.head
while node:
yield node.data
UpperCamelCase_ : Dict = node.next
def __len__( self : int ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return "->".join([str(snake_case ) for item in self] )
def __getitem__( self : Union[str, Any] , snake_case : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Any , snake_case : int , snake_case : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCamelCase_ : int = self.head
for _ in range(snake_case ):
UpperCamelCase_ : Union[str, Any] = current.next
UpperCamelCase_ : Any = data
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int , snake_case : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCamelCase_ : Union[str, Any] = Node(snake_case )
if self.head is None:
UpperCamelCase_ : Union[str, Any] = new_node
elif index == 0:
UpperCamelCase_ : int = self.head # link new_node to head
UpperCamelCase_ : List[str] = new_node
else:
UpperCamelCase_ : List[str] = self.head
for _ in range(index - 1 ):
UpperCamelCase_ : Union[str, Any] = temp.next
UpperCamelCase_ : Dict = temp.next
UpperCamelCase_ : Optional[Any] = new_node
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None: # print every node data
"""simple docstring"""
print(self )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCamelCase_ : str = self.head # default first node
if index == 0:
UpperCamelCase_ : List[Any] = self.head.next
else:
UpperCamelCase_ : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase_ : Tuple = temp.next
UpperCamelCase_ : List[Any] = temp.next
UpperCamelCase_ : Dict = temp.next.next
return delete_node.data
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> bool:
"""simple docstring"""
return self.head is None
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
"""simple docstring"""
UpperCamelCase_ : str = None
UpperCamelCase_ : int = self.head
while current:
# Store the current node's next node.
UpperCamelCase_ : Tuple = current.next
# Make the current node's next point backwards
UpperCamelCase_ : Tuple = prev
# Make the previous node be the current node
UpperCamelCase_ : List[Any] = current
# Make the current node the next node (to progress iteration)
UpperCamelCase_ : Dict = next_node
# Return prev in order to put the head at the end
UpperCamelCase_ : Union[str, Any] = prev
def __lowercase ( ):
UpperCamelCase_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase ) == i
linked_list.insert_nth(lowerCamelCase , i + 1 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase ) == 9
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCamelCase_ : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase ) == "->".join(str(lowerCamelCase ) for i in range(-8 , 1 ) )
def __lowercase ( ):
UpperCamelCase_ : List[str] = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
UpperCamelCase_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase_ : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase_ : Tuple = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase_ : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase )
assert (
str(lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowercase ( ):
from doctest import testmod
testmod()
UpperCamelCase_ : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCamelCase )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
UpperCamelCase_ : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCamelCase )
print(F"length of linked_list is : {len(lowerCamelCase )}" )
if __name__ == "__main__":
main()
| 50 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A: List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 256}
UpperCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : int = size
UpperCAmelCase : Any = resample
UpperCAmelCase : Dict = do_center_crop
UpperCAmelCase : Dict = crop_size
UpperCAmelCase : Tuple = do_rescale
UpperCAmelCase : Optional[Any] = rescale_factor
UpperCAmelCase : Any = do_normalize
UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase : Tuple = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["""shortest_edge"""] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : str = size if size is not None else self.size
UpperCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : int = image_std if image_std is not None else self.image_std
UpperCAmelCase : List[Any] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCAmelCase : Any = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCAmelCase : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCAmelCase : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCAmelCase : str = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCAmelCase : str = {"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Any = target_sizes.numpy()
UpperCAmelCase : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : List[Any] = logits.argmax(dim=1 )
UpperCAmelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 109 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Optional[Any] ={
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """facebook/nllb-200-distilled-600M"""
__lowercase = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__lowercase = """translator"""
__lowercase = AutoTokenizer
__lowercase = AutoModelForSeqaSeqLM
__lowercase = LANGUAGE_CODES
__lowercase = ["""text""", """text""", """text"""]
__lowercase = ["""text"""]
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :int )-> str:
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
A__ = self.lang_to_code[src_lang]
A__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase_ , return_tensors="pt" , src_lang=lowercase_ , tgt_lang=lowercase_ )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Any )-> int:
return self.model.generate(**lowercase_ )
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] )-> str:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase_ )
| 237 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = SamImageProcessor()
SCREAMING_SNAKE_CASE = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ,**lowerCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 )
SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase__ ,return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE = [[1764, 2646]]
SCREAMING_SNAKE_CASE = [[683, 1024]]
SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,torch.tensor(lowerCamelCase__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = SamImageProcessor()
SCREAMING_SNAKE_CASE = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,**lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 )
SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase__ ,return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE = [[1764, 2646]]
SCREAMING_SNAKE_CASE = [[683, 1024]]
SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors="""tf""" )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase__ ,tf.convert_to_tensor(lowerCamelCase__ ) ,tf.convert_to_tensor(lowerCamelCase__ ) ,return_tensors="""tf""" ,)
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors="""tf""" )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = SamImageProcessor()
SCREAMING_SNAKE_CASE = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE = [tf.convert_to_tensor(lowerCamelCase__ )]
SCREAMING_SNAKE_CASE = [torch.tensor(lowerCamelCase__ )]
SCREAMING_SNAKE_CASE = [[1764, 2646]]
SCREAMING_SNAKE_CASE = [[683, 1024]]
SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors="""tf""" )
SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase__ ,return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase__ ,return_tensors="""pt""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase__ ,return_tensors="""tf""" )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase__ ,return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
| 193 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 50 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.