code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
import sys
A__ : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A__ : Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCAmelCase ( *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 353 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE__:List[str] = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE__:Any = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE__:Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE__:Tuple = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE__:Optional[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE__:List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE__:Optional[int] = None
def _lowerCamelCase( ):
global _logger
__a = _logger or logging.getLogger(__name__ )
return _logger
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
__a = lock_file
return None
def __str__( self ):
__a = F"The file lock \'{self.lock_file}\' could not be acquired."
return temp
class snake_case__ :
def __init__( self , lowerCamelCase ):
__a = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.lock.release()
return None
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ):
__a = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__a = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
__a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__a = None
# The default timeout value.
__a = timeout
# We use this lock primarily for the lock counter.
__a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__a = 0
return None
@property
def a__ ( self ):
return self._lock_file
@property
def a__ ( self ):
return self._timeout
@timeout.setter
def a__ ( self , lowerCamelCase ):
__a = float(_lowerCAmelCase )
return None
def a__ ( self ):
raise NotImplementedError()
def a__ ( self ):
raise NotImplementedError()
@property
def a__ ( self ):
return self._lock_file_fd is not None
def a__ ( self , lowerCamelCase=None , lowerCamelCase=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__a = id(self )
__a = self._lock_file
__a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def a__ ( self , lowerCamelCase=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__a = id(self )
__a = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__a = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.release()
return None
def __del__( self ):
self.release(force=_lowerCAmelCase )
return None
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
__a = os.path.dirname(_lowerCAmelCase )
__a = str(hash(_lowerCAmelCase ) )
__a = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
__a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def a__ ( self ):
__a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__a = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
__a = fd
return None
def a__ ( self ):
__a = self._lock_file_fd
__a = None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ):
__a = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def a__ ( self ):
__a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__a = os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
__a = fd
return None
def a__ ( self ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__a = self._lock_file_fd
__a = None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class snake_case__ ( snake_case_ ):
def a__ ( self ):
__a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__a = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
__a = fd
return None
def a__ ( self ):
os.close(self._lock_file_fd )
__a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE__:Optional[int] = None
if msvcrt:
SCREAMING_SNAKE_CASE__:Dict = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE__:int = UnixFileLock
else:
SCREAMING_SNAKE_CASE__:Any = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 528 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowercase_ = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
lowercase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowercase_ = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
lowercase_ = tokenizer.decode(greedy_ids[0] )
lowercase_ = TextIteratorStreamer(_lowerCAmelCase )
lowercase_ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowercase_ = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
lowercase_ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowercase_ = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
lowercase_ = greedy_ids[:, input_ids.shape[1] :]
lowercase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowercase_ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCAmelCase )
lowercase_ = -1
lowercase_ = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase_ = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase_ = cs.out[:-1] # Remove the final "\n"
lowercase_ = tokenizer(_lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase_ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase )
lowercase_ = -1
lowercase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
lowercase_ = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
lowercase_ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowercase_ = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
lowercase_ = """"""
for new_text in streamer:
streamer_text += new_text
| 412 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = "data2vec-audio"
def __init__( self ,A=32 ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.1 ,A=0.1 ,A=0.1 ,A=0.0 ,A=0.1 ,A=0.1 ,A=0.02 ,A=1e-5 ,A="gelu" ,A=(512, 512, 512, 512, 512, 512, 512) ,A=(5, 2, 2, 2, 2, 2, 2) ,A=(10, 3, 3, 3, 3, 2, 2) ,A=False ,A=16 ,A=19 ,A=5 ,A=0.05 ,A=10 ,A=2 ,A=0.0 ,A=10 ,A=0 ,A="sum" ,A=False ,A=False ,A=256 ,A=(512, 512, 512, 512, 1_500) ,A=(5, 3, 3, 1, 1) ,A=(1, 2, 3, 1, 1) ,A=512 ,A=0 ,A=1 ,A=2 ,A=False ,A=3 ,A=2 ,A=3 ,A=None ,**A ,):
super().__init__(**_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = conv_pos_kernel_size
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# adapter
UpperCAmelCase = add_adapter
UpperCAmelCase = adapter_kernel_size
UpperCAmelCase = adapter_stride
UpperCAmelCase = num_adapter_layers
UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = list(_lowerCAmelCase )
UpperCAmelCase = xvector_output_dim
@property
def _UpperCamelCase ( self ):
return math.prod(self.conv_stride )
| 341 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : str = 10
def lowercase_ ( self ):
__snake_case : Tuple = [1, 2, 3, 4]
__snake_case : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__snake_case : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__snake_case : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__snake_case , __snake_case : str = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
def lowercase_ ( self ):
__snake_case : int = ''
__snake_case , __snake_case : List[str] = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
self.assertEqual(_lowerCAmelCase , [] )
def lowercase_ ( self ):
__snake_case : Optional[int] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__snake_case , __snake_case : str = process_story(_lowerCAmelCase )
__snake_case : Union[str, Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = ['It was the best of times.']
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
__snake_case : str = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def lowercase_ ( self ):
__snake_case : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__snake_case : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def lowercase_ ( self ):
__snake_case : str = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__snake_case : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = 101
__snake_case : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__snake_case : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__snake_case : Optional[Any] = compute_token_type_ids(_lowerCAmelCase , _lowerCAmelCase )
np.testing.assert_array_equal(_lowerCAmelCase , _lowerCAmelCase )
| 576 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:\nData does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : float | Decimal , _lowercase : float = 10**-10 ) ->Dict:
'''simple docstring'''
a : List[Any] = a
while True:
a : Union[str, Any] = Decimal(SCREAMING_SNAKE_CASE_ ) - (
Decimal(eval(SCREAMING_SNAKE_CASE_ ) ) / Decimal(eval(str(diff(SCREAMING_SNAKE_CASE_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(SCREAMING_SNAKE_CASE_ ) ) < precision: # noqa: S307
return float(SCREAMING_SNAKE_CASE_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 633 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str]=None ) -> Union[str, Any]:
if subparsers is not None:
_a : Tuple =subparsers.add_parser("""test""" )
else:
_a : Optional[int] =argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" ,default=SCREAMING_SNAKE_CASE_ ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) ,)
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Any:
_a : str =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
_a : Tuple =script_name
else:
_a : Union[str, Any] =F"--config_file={args.config_file} {script_name}"
_a : Any =["""accelerate-launch"""] + test_args.split()
_a : List[Any] =execute_subprocess_async(SCREAMING_SNAKE_CASE_ ,env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_a : Dict =test_command_parser()
_a : Union[str, Any] =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( a__ , a__ , a__ , a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"""Building PyTorch model from configuration: {config}""" )
__SCREAMING_SNAKE_CASE = FunnelBaseModel(SCREAMING_SNAKE_CASE_ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 148 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = R"""\w+[.]\d+"""
lowerCamelCase__ = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowerCamelCase__ = key.replace(SCREAMING_SNAKE_CASE_ , """_""".join(pat.split(""".""" ) ) )
return key
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=42 ):
lowerCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase__ = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__ = rename_key(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
lowerCamelCase__ , lowerCamelCase__ = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
lowerCamelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A__ : Tuple = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 353 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class snake_case__ ( snake_case_ ):
def __lt__( self , lowerCamelCase ):
return self[-1] < other[-1]
def __eq__( self , lowerCamelCase ):
return self[-1] == other[-1]
def _lowerCamelCase( a ):
__a = []
# sort into stacks
for element in collection:
__a = Stack([element] )
__a = bisect_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if i != len(SCREAMING_SNAKE_CASE_ ):
stacks[i].append(SCREAMING_SNAKE_CASE_ )
else:
stacks.append(SCREAMING_SNAKE_CASE_ )
# use a heap-based merge to merge stack efficiently
__a = merge(*(reversed(SCREAMING_SNAKE_CASE_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__:List[Any] = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 528 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = "mask2former"
__SCREAMING_SNAKE_CASE : Optional[Any] = ["swin"]
__SCREAMING_SNAKE_CASE : str = {"hidden_size": "hidden_dim"}
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict = None , UpperCamelCase__ : Dict = 256 , UpperCamelCase__ : Union[str, Any] = 256 , UpperCamelCase__ : Any = 256 , UpperCamelCase__ : Dict = 1_024 , UpperCamelCase__ : List[Any] = "relu" , UpperCamelCase__ : Optional[Any] = 6 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : str = 8 , UpperCamelCase__ : Optional[Any] = 0.0 , UpperCamelCase__ : Tuple = 2_048 , UpperCamelCase__ : List[Any] = False , UpperCamelCase__ : List[str] = False , UpperCamelCase__ : List[str] = 4 , UpperCamelCase__ : int = 255 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : str = 0.1 , UpperCamelCase__ : List[Any] = 2.0 , UpperCamelCase__ : Tuple = 5.0 , UpperCamelCase__ : List[str] = 5.0 , UpperCamelCase__ : Any = 12_544 , UpperCamelCase__ : Optional[Any] = 3.0 , UpperCamelCase__ : Optional[Any] = 0.75 , UpperCamelCase__ : List[str] = 0.02 , UpperCamelCase__ : Union[str, Any] = 1.0 , UpperCamelCase__ : List[str] = True , UpperCamelCase__ : str = [4, 8, 16, 32] , UpperCamelCase__ : List[Any] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase_ = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase_ = backbone_config.pop("""model_type""" )
lowercase_ = CONFIG_MAPPING[backbone_model_type]
lowercase_ = config_class.from_dict(_lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
lowercase_ = backbone_config
lowercase_ = feature_size
lowercase_ = mask_feature_size
lowercase_ = hidden_dim
lowercase_ = encoder_feedforward_dim
lowercase_ = activation_function
lowercase_ = encoder_layers
lowercase_ = decoder_layers
lowercase_ = num_attention_heads
lowercase_ = dropout
lowercase_ = dim_feedforward
lowercase_ = pre_norm
lowercase_ = enforce_input_projection
lowercase_ = common_stride
lowercase_ = ignore_value
lowercase_ = num_queries
lowercase_ = no_object_weight
lowercase_ = class_weight
lowercase_ = mask_weight
lowercase_ = dice_weight
lowercase_ = train_num_points
lowercase_ = oversample_ratio
lowercase_ = importance_sample_ratio
lowercase_ = init_std
lowercase_ = init_xavier_std
lowercase_ = use_auxiliary_loss
lowercase_ = feature_strides
lowercase_ = output_auxiliary_logits
lowercase_ = decoder_layers
super().__init__(**_lowerCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return cls(
backbone_config=_lowerCAmelCase , **_lowerCAmelCase , )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 412 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
"""simple docstring"""
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
UpperCAmelCase = None
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
def _UpperCamelCase ( self ):
UpperCAmelCase = self.head
while temp is not None:
print(temp.data ,end=""" """ )
UpperCAmelCase = temp.next
print()
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node(_lowerCAmelCase )
UpperCAmelCase = self.head
UpperCAmelCase = new_node
def _UpperCamelCase ( self ,A ,A ):
if node_data_a == node_data_a:
return
else:
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase , UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
_UpperCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 341 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LEDTokenizer
__UpperCAmelCase = LEDTokenizerFast
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case : List[str] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : Optional[int] = {'unk_token': '<unk>'}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCAmelCase ) )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return "lower newer", "lower newer"
@cached_property
def lowercase_ ( self ):
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def lowercase_ ( self ):
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def lowercase_ ( self ):
__snake_case : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : Optional[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : Any = tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__snake_case : Any = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_torch
def lowercase_ ( self ):
__snake_case : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt' )
self.assertIn('input_ids' , _lowerCAmelCase )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertNotIn('labels' , _lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase )
@require_torch
def lowercase_ ( self ):
__snake_case : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : List[str] = tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def lowercase_ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : str = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowercase_ ( self ):
__snake_case : Union[str, Any] = ['A long paragraph for summarization.']
__snake_case : Any = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : int = tokenizer(_lowerCAmelCase , return_tensors='pt' )
__snake_case : List[str] = tokenizer(text_target=_lowerCAmelCase , return_tensors='pt' )
__snake_case : Optional[Any] = inputs['input_ids']
__snake_case : str = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase_ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case : Optional[int] = ['Summary of the text.', 'Another summary.']
__snake_case : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__snake_case : Union[str, Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase )
__snake_case : int = [[0] * len(_lowerCAmelCase ) for x in encoded_output['input_ids']]
__snake_case : int = tokenizer.pad(_lowerCAmelCase )
self.assertSequenceEqual(outputs['global_attention_mask'] , _lowerCAmelCase )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Any = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : int = 'A, <mask> AllenNLP sentence.'
__snake_case : Tuple = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
__snake_case : List[str] = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__snake_case : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 576 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {'vocab_file': 'vocab.json'}
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
UpperCAmelCase_ : str = {'mgp-str': 27}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __snake_case , __snake_case="[GO]" , __snake_case="[GO]" , __snake_case="[s]" , __snake_case="[GO]" , **__snake_case ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_SCREAMING_SNAKE_CASE : Tuple = json.load(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self ):
return len(self.vocab )
def UpperCAmelCase_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = []
for s in text:
char_tokens.extend(_lowerCAmelCase )
return char_tokens
def UpperCAmelCase_ ( self , __snake_case ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , __snake_case ):
return self.decoder.get(_lowerCAmelCase )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_lowerCAmelCase ) )
return
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
return (vocab_file,)
| 533 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
a : Union[str, Any] = {}
a : str = tokenizer(example["content"] , truncation=SCREAMING_SNAKE_CASE_ )["input_ids"]
a : Optional[int] = len(example["content"] ) / len(output["input_ids"] )
return output
a : Optional[Any] = HfArgumentParser(PretokenizationArguments)
a : Any = parser.parse_args()
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
a : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Union[str, Any] = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Optional[Any] = time.time()
a : int = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : List[str] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 633 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Optional[int] = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Union[str, Any] = "donut-swin"
__UpperCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Tuple=2_2_4 , SCREAMING_SNAKE_CASE :Union[str, Any]=4 , SCREAMING_SNAKE_CASE :Optional[Any]=3 , SCREAMING_SNAKE_CASE :Union[str, Any]=9_6 , SCREAMING_SNAKE_CASE :Optional[Any]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE :Tuple=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE :Tuple=7 , SCREAMING_SNAKE_CASE :Any=4.0 , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Dict=0.0 , SCREAMING_SNAKE_CASE :str=0.0 , SCREAMING_SNAKE_CASE :Dict=0.1 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=0.02 , SCREAMING_SNAKE_CASE :Union[str, Any]=1e-5 , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
_a : List[str] =image_size
_a : str =patch_size
_a : int =num_channels
_a : Optional[Any] =embed_dim
_a : Dict =depths
_a : List[Any] =len(_lowerCAmelCase )
_a : Optional[Any] =num_heads
_a : Union[str, Any] =window_size
_a : Optional[Any] =mlp_ratio
_a : Dict =qkv_bias
_a : Optional[int] =hidden_dropout_prob
_a : Any =attention_probs_dropout_prob
_a : List[Any] =drop_path_rate
_a : Optional[Any] =hidden_act
_a : Optional[int] =use_absolute_embeddings
_a : Optional[Any] =layer_norm_eps
_a : List[str] =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a : str =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Union[str, Any]=3_0 , UpperCamelCase__ : int=4_0_0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : int=True , UpperCamelCase__ : str=[0.5, 0.5, 0.5] , UpperCamelCase__ : str=[0.5, 0.5, 0.5] , UpperCamelCase__ : int=True , UpperCamelCase__ : str=1 / 2_5_5 , UpperCamelCase__ : Union[str, Any]=True , )-> int:
'''simple docstring'''
__lowerCAmelCase: List[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Optional[Any] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: List[str] = min_resolution
__lowerCAmelCase: str = max_resolution
__lowerCAmelCase: List[Any] = do_resize
__lowerCAmelCase: Dict = size
__lowerCAmelCase: Optional[int] = do_normalize
__lowerCAmelCase: Optional[Any] = image_mean
__lowerCAmelCase: Any = image_std
__lowerCAmelCase: List[str] = do_rescale
__lowerCAmelCase: Any = rescale_factor
__lowerCAmelCase: Optional[int] = do_pad
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=False)-> Union[str, Any]:
'''simple docstring'''
if not batched:
__lowerCAmelCase: Optional[Any] = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image):
__lowerCAmelCase , __lowerCAmelCase: int = image.size
else:
__lowerCAmelCase , __lowerCAmelCase: Dict = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase: Any = int(self.size["shortest_edge"] * h / w)
__lowerCAmelCase: Optional[int] = self.size["shortest_edge"]
elif w > h:
__lowerCAmelCase: Optional[int] = self.size["shortest_edge"]
__lowerCAmelCase: str = int(self.size["shortest_edge"] * w / h)
else:
__lowerCAmelCase: Optional[Any] = self.size["shortest_edge"]
__lowerCAmelCase: List[str] = self.size["shortest_edge"]
else:
__lowerCAmelCase: Optional[Any] = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCAmelCase: Optional[Any] = max(_lowerCAmelCase , key=lambda UpperCamelCase__: item[0])[0]
__lowerCAmelCase: List[str] = max(_lowerCAmelCase , key=lambda UpperCamelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = ConditionalDetrImageProcessingTester(self)
@property
def lowercase_ ( self : Dict)-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowerCAmelCase , "image_mean"))
self.assertTrue(hasattr(_lowerCAmelCase , "image_std"))
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize"))
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize"))
self.assertTrue(hasattr(_lowerCAmelCase , "size"))
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3})
self.assertEqual(image_processor.do_pad , _lowerCAmelCase)
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_lowerCAmelCase)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4})
self.assertEqual(image_processor.do_pad , _lowerCAmelCase)
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image)
# Test not batched input
__lowerCAmelCase: Any = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.image_processor_tester.get_expected_values(_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase: int = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase)
__lowerCAmelCase: str = image_processing(_lowerCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray)
# Test not batched input
__lowerCAmelCase: Any = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
__lowerCAmelCase , __lowerCAmelCase: str = self.image_processor_tester.get_expected_values(_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: Tuple = image_processing(_lowerCAmelCase , return_tensors="pt").pixel_values
__lowerCAmelCase , __lowerCAmelCase: int = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.image_processor_tester.get_expected_values(_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase: Dict = image_processing(_lowerCAmelCase , return_tensors="pt").pixel_values
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
__lowerCAmelCase: List[Any] = json.loads(f.read())
__lowerCAmelCase: Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
__lowerCAmelCase: List[Any] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50")
__lowerCAmelCase: Dict = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors="pt")
# verify pixel values
__lowerCAmelCase: Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding["pixel_values"].shape , _lowerCAmelCase)
__lowerCAmelCase: Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4))
# verify area
__lowerCAmelCase: str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowerCAmelCase))
# verify boxes
__lowerCAmelCase: int = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowerCAmelCase)
__lowerCAmelCase: Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowerCAmelCase , atol=1e-3))
# verify image_id
__lowerCAmelCase: int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowerCAmelCase))
# verify is_crowd
__lowerCAmelCase: str = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowerCAmelCase))
# verify class_labels
__lowerCAmelCase: Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowerCAmelCase))
# verify orig_size
__lowerCAmelCase: int = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowerCAmelCase))
# verify size
__lowerCAmelCase: List[str] = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowerCAmelCase))
@slow
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
__lowerCAmelCase: Union[str, Any] = json.loads(f.read())
__lowerCAmelCase: Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
__lowerCAmelCase: List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
__lowerCAmelCase: Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic")
__lowerCAmelCase: int = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors="pt")
# verify pixel values
__lowerCAmelCase: str = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding["pixel_values"].shape , _lowerCAmelCase)
__lowerCAmelCase: str = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4))
# verify area
__lowerCAmelCase: Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowerCAmelCase))
# verify boxes
__lowerCAmelCase: Optional[int] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowerCAmelCase)
__lowerCAmelCase: Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowerCAmelCase , atol=1e-3))
# verify image_id
__lowerCAmelCase: List[Any] = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowerCAmelCase))
# verify is_crowd
__lowerCAmelCase: Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowerCAmelCase))
# verify class_labels
__lowerCAmelCase: Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowerCAmelCase))
# verify masks
__lowerCAmelCase: Tuple = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _lowerCAmelCase)
# verify orig_size
__lowerCAmelCase: Union[str, Any] = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowerCAmelCase))
# verify size
__lowerCAmelCase: Dict = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowerCAmelCase))
| 346 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A=2 , _A=3 , _A=4 , _A=2 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=36 , _A=3 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=6 , _A=6 , _A=3 , _A=4 , _A=None , _A=1_000 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = text_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = coordinate_size
__SCREAMING_SNAKE_CASE = shape_size
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__SCREAMING_SNAKE_CASE = text_seq_length
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
__SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE = t
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
__SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__SCREAMING_SNAKE_CASE = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__SCREAMING_SNAKE_CASE = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self , _A , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Dict = False
UpperCamelCase__ : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
return True
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _A ( self , _A , _A , _A=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def _A ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __lowercase ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).pixel_values.to(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__SCREAMING_SNAKE_CASE = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 148 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = "M-CLIP"
def __init__( self ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,**_lowerCAmelCase ):
lowerCamelCase__ = transformerDimSize
lowerCamelCase__ = imageDimSize
super().__init__(**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = MCLIPConfig
def __init__( self ,_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase ):
super().__init__(_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = XLMRobertaModel(_lowerCAmelCase )
lowerCamelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.transformer(input_ids=_lowerCAmelCase ,attention_mask=_lowerCAmelCase )[0]
lowerCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs
| 50 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
A__ : List[str] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase_ = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase_ = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase_ = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
_lowercase: Tuple = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowercase: Any = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase: Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase: str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase: Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase: str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase: Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowercase: Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowercase: Tuple = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowercase: Union[str, Any] = data_args.train_file.split('''.''' )[-1]
_lowercase: Optional[Any] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowercase: Any = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
_lowercase: int = load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowercase: str = load_dataset('''json''' , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowercase: Any = raw_datasets['''train'''].features['''label'''].names
_lowercase: Any = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase: Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowercase: Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE_ , )
_lowercase: str = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowercase: Union[str, Any] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase: List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowercase: str = {'''Refused''': 0, '''Entailed''': 1}
_lowercase: int = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowercase: List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCamelCase ):
_lowercase: Any = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
_lowercase: List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowercase: List[str] = examples['''statement''']
_lowercase: Tuple = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
_lowercase: Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )
_lowercase: Union[str, Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
_lowercase: str = raw_datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
_lowercase: Any = raw_datasets['''train''']
if data_args.max_train_samples is not None:
_lowercase: Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
_lowercase: Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
_lowercase: int = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
_lowercase: Union[str, Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
_lowercase: Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
_lowercase: Union[str, Any] = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE_ ) else p.predictions
_lowercase: str = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase: Tuple = default_data_collator
elif training_args.fpaa:
_lowercase: str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 )
else:
_lowercase: Any = None
# Initialize our Trainer
_lowercase: Union[str, Any] = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
_lowercase: int = None
if training_args.resume_from_checkpoint is not None:
_lowercase: Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase: Optional[Any] = last_checkpoint
_lowercase: List[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
_lowercase: Any = train_result.metrics
_lowercase: List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
_lowercase: Dict = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('''train''' , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowercase: Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE_ )
_lowercase: int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
_lowercase: int = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowercase: Optional[int] = predict_dataset.remove_columns('''label''' )
_lowercase: Tuple = trainer.predict(SCREAMING_SNAKE_CASE_ , metric_key_prefix='''predict''' ).predictions
_lowercase: int = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
_lowercase: Optional[int] = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowercase: str = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
_lowercase: Dict = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 353 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=False , ):
__a = size if size is not None else {"height": 20, "width": 20}
__a = crop_size if crop_size is not None else {"height": 18, "width": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def a__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _lowerCamelCase( ):
__a = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__a = Image.open(dataset[0]["file"] )
__a = Image.open(dataset[1]["file"] )
return image, map
def _lowerCamelCase( ):
__a = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__a = Image.open(ds[0]["file"] )
__a = Image.open(ds[1]["file"] )
__a = Image.open(ds[2]["file"] )
__a = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : int = BeitImageProcessor if is_vision_available() else None
def a__ ( self ):
__a = BeitImageProcessingTester(self )
@property
def a__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "image_std" ) )
def a__ ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
def a__ ( self ):
pass
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__a = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__a = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__a = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
__a = []
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__a = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def a__ ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__a = True
__a = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 528 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 412 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
"""simple docstring"""
def _a ( _snake_case , _snake_case , _snake_case = 0 , _snake_case = 0 ):
"""simple docstring"""
UpperCAmelCase = right or len(SCREAMING_SNAKE_CASE_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 100 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , ):
if audio_length_in_s is None:
__snake_case : Any = self.unet.config.sample_size / self.unet.config.sample_rate
__snake_case : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
__snake_case : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it\'s bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__snake_case : Optional[int] = int(_lowerCAmelCase )
if sample_size % down_scale_factor != 0:
__snake_case : List[Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
__snake_case : Optional[int] = int(_lowerCAmelCase )
__snake_case : Optional[int] = next(iter(self.unet.parameters() ) ).dtype
__snake_case : List[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__snake_case : Tuple = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase , device=audio.device )
__snake_case : Any = self.scheduler.timesteps.to(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case : Optional[int] = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__snake_case : Tuple = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
__snake_case : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
__snake_case : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowerCAmelCase )
| 576 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
'''simple docstring'''
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 20 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_SCREAMING_SNAKE_CASE : List[str] = n // 2
return int(factorial(SCREAMING_SNAKE_CASE_ ) / (factorial(SCREAMING_SNAKE_CASE_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 533 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[str] =CTRLTokenizer
lowerCamelCase : int =False
lowerCamelCase : Optional[Any] =False
def __a ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : List[str] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
a : Any = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
a : Tuple = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
a : Any = {"unk_token": "<unk>"}
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def __a ( self , **lowerCAmelCase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[int] = "adapt react readapt apt"
a : Optional[int] = "adapt react readapt apt"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
a : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : List[Any] = "adapt react readapt apt"
a : Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
a : Optional[int] = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
a : Tuple = tokens + [tokenizer.unk_token]
a : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
| 633 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A__: Any = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A__: List[str] = [ord(letter) for letter in string.ascii_lowercase]
A__: Union[str, Any] = {ord(char) for char in VALID_CHARS}
A__: Optional[Any] = ['''the''', '''be''', '''to''', '''of''', '''and''', '''in''', '''that''', '''have''']
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : tuple[int, ...] ) -> List[str]:
_a : int =""""""
_a : int =42
_a : List[str] =42
_a : Dict =42
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ):
_a : List[Any] =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE_ )
return decoded
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[int] ) -> Any:
_a : Any =[]
for key in product(SCREAMING_SNAKE_CASE_ ,repeat=3 ):
_a : Dict =try_key(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE_ )
return possibles
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[str] ,_UpperCAmelCase : str ) -> int:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str = "p059_cipher.txt" ) -> Optional[Any]:
_a : str =42
_a : Any =42
_a : Dict =42
_a : Union[str, Any] =42
_a : List[str] =Path(SCREAMING_SNAKE_CASE_ ).parent.joinpath(SCREAMING_SNAKE_CASE_ ).read_text(encoding="""utf-8""" )
_a : Union[str, Any] =[int(SCREAMING_SNAKE_CASE_ ) for number in data.strip().split(""",""" )]
_a : Dict =filter_valid_chars(SCREAMING_SNAKE_CASE_ )
for common_word in COMMON_WORDS:
_a : Tuple =filter_common_word(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
_a : int =possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE_ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class snake_case ( __snake_case ):
@staticmethod
def lowercase_ ( UpperCamelCase__ : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=_lowerCAmelCase , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_lowerCAmelCase)
def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , *UpperCamelCase__ : Union[str, Any] , )-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = logging.get_logger("transformers-cli/converting")
self._logger.info(f"Loading model {model_type}")
__lowerCAmelCase: Optional[Any] = model_type
__lowerCAmelCase: List[Any] = tf_checkpoint
__lowerCAmelCase: List[Any] = pytorch_dump_output
__lowerCAmelCase: Optional[int] = config
__lowerCAmelCase: Any = finetuning_task_name
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCAmelCase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase)
if "ckpt" in self._tf_checkpoint.lower():
__lowerCAmelCase: Dict = self._tf_checkpoint
__lowerCAmelCase: str = ""
else:
__lowerCAmelCase: Dict = self._tf_checkpoint
__lowerCAmelCase: Optional[Any] = ""
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCAmelCase , self._config , self._pytorch_dump_output , _lowerCAmelCase)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 346 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = (DDPMParallelScheduler,)
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_lowerCAmelCase )
return config
def _A ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _A ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = len(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
__SCREAMING_SNAKE_CASE = samplea.shape[0]
__SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 )
__SCREAMING_SNAKE_CASE = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = len(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='v_prediction' )
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = len(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 148 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : List[str] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Dict = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
UpperCamelCase : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ElectraTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
from collections import namedtuple
A__ : List[Any] = namedtuple('from_to', 'from_ to')
A__ : Dict = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_0_0_0),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(SCREAMING_SNAKE_CASE_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(SCREAMING_SNAKE_CASE_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
SCREAMING_SNAKE_CASE__:Tuple = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 65536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48000,
"""sample_size""": 131072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16000,
"""sample_size""": 65536,
},
}
def _lowerCamelCase( a , a ):
return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2
def _lowerCamelCase( a ):
__a = torch.sin(t * math.pi / 2 ) ** 2
__a = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class snake_case__ ( snake_case_ ):
pass
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = DiffusionAttnUnetaD(_lowerCAmelCase , n_attn_layers=4 )
__a = deepcopy(self.diffusion )
__a = torch.quasirandom.SobolEngine(1 , scramble=_lowerCAmelCase )
def _lowerCamelCase( a ):
__a = MODELS_MAP[model_name]["url"]
os.system(F"wget {url} ./" )
return F"./{model_name}.ckpt"
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
SCREAMING_SNAKE_CASE__:Tuple = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
SCREAMING_SNAKE_CASE__:List[Any] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
SCREAMING_SNAKE_CASE__:Dict = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
SCREAMING_SNAKE_CASE__:List[Any] = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
SCREAMING_SNAKE_CASE__:List[str] = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def _lowerCamelCase( a ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowerCamelCase( a ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif name.startswith(SCREAMING_SNAKE_CASE_ ):
return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value]
raise ValueError(F"Attn error with {name}" )
def _lowerCamelCase( a , a=1_3 ):
__a = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
__a = 0
if string.startswith("net.3." ):
depth += 1
__a = string[6:]
elif string.startswith("net." ):
__a = string[4:]
while string.startswith("main.7." ):
depth += 1
__a = string[7:]
if string.startswith("main." ):
__a = string[5:]
# mid block
if string[:2].isdigit():
__a = string[:2]
__a = string[2:]
else:
__a = string[0]
__a = string[1:]
if depth == max_depth:
__a = MID_NUM_TO_LAYER[layer_num]
__a = "mid_block"
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7:
__a = DOWN_NUM_TO_LAYER[layer_num]
__a = F"down_blocks.{depth}"
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7:
__a = UP_NUM_TO_LAYER[layer_num]
__a = F"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
__a = DEPTH_0_TO_LAYER[layer_num]
__a = F"up_blocks.{max_depth - 1}" if int(SCREAMING_SNAKE_CASE_ ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." )
__a = string_left[1:]
if "resnets" in new_layer:
__a = convert_resconv_naming(SCREAMING_SNAKE_CASE_ )
elif "attentions" in new_layer:
__a = convert_attn_naming(SCREAMING_SNAKE_CASE_ )
__a = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__a = prefix + "." + new_layer + "." + string_left
else:
__a = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def _lowerCamelCase( a ):
__a = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
__a = rename(SCREAMING_SNAKE_CASE_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__a = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
__a = v
return new_state_dict
def _lowerCamelCase( a , a , a ):
if len(SCREAMING_SNAKE_CASE_ ) == 1:
if len(v.shape ) == 3:
# weight
__a = v[:, :, 0]
else:
# bias
__a = v
else:
# qkv matrices
__a = v.shape[0]
__a = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__a = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__a = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowerCamelCase( a ):
__a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__a = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
__a = download(SCREAMING_SNAKE_CASE_ )
__a = MODELS_MAP[model_name]["sample_rate"]
__a = MODELS_MAP[model_name]["sample_size"]
__a = Object()
__a = sample_size
__a = sample_rate
__a = 0
__a = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ )
__a = diffusers_model.state_dict()
__a = DiffusionUncond(SCREAMING_SNAKE_CASE_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )["state_dict"] )
__a = orig_model.diffusion_ema.eval()
__a = orig_model.state_dict()
__a = rename_orig_weights(SCREAMING_SNAKE_CASE_ )
__a = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__a = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE_ ) == 0, F"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("kernel" ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
__a = value.squeeze()
__a = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__a = 1_0_0
__a = 3_3
__a = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
__a = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
__a = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
__a = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1]
__a = get_crash_schedule(SCREAMING_SNAKE_CASE_ )
__a = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
__a = torch.manual_seed(3_3 )
__a = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios
__a = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} )
__a = generated.clamp(-1 , 1 )
__a = (generated - audio).abs().sum()
__a = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , SCREAMING_SNAKE_CASE_ )
print("Diff max" , SCREAMING_SNAKE_CASE_ )
assert diff_max < 1E-3, F"Diff max: {diff_max} is too much :-/"
print(F"Conversion for {model_name} successful!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
main(args)
| 528 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ["torch", "scipy"]
def __init__( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """scipy"""] )
| 412 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case : List[Any] = quote(SCREAMING_SNAKE_CASE_ )
return hfh.hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' , revision=SCREAMING_SNAKE_CASE_ )
| 576 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCAmelCase_ : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n'
UpperCAmelCase_ : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case=False , __snake_case=False , __snake_case=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([re.sub(_lowerCAmelCase , """""" , _lowerCAmelCase ) for x in predictions] )
_SCREAMING_SNAKE_CASE : List[str] = np.array([re.sub(_lowerCAmelCase , """""" , _lowerCAmelCase ) for x in references] )
else:
_SCREAMING_SNAKE_CASE : List[Any] = np.asarray(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(_lowerCAmelCase )
if ignore_case:
_SCREAMING_SNAKE_CASE : List[str] = np.char.lower(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_SCREAMING_SNAKE_CASE : List[Any] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_SCREAMING_SNAKE_CASE : int = string.digits.maketrans("""""" , """""" , string.digits )
_SCREAMING_SNAKE_CASE : str = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 533 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase = int((256 / 224) * size["shortest_edge"] )
_lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
_lowerCAmelCase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature:
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
_lowerCAmelCase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
_lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=128 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> str:
a : Union[str, Any] = parent
a : List[Any] = batch_size
a : Union[str, Any] = seq_length
a : Any = is_training
a : Optional[Any] = use_input_mask
a : Dict = use_token_type_ids
a : str = use_labels
a : List[str] = vocab_size
a : Any = hidden_size
a : str = num_hidden_layers
a : Dict = num_attention_heads
a : Any = intermediate_size
a : List[Any] = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : str = type_sequence_label_size
a : Any = initializer_range
a : Tuple = num_labels
a : Any = num_choices
a : Optional[Any] = scope
def __a ( self ) -> Optional[int]:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : str = None
if self.use_input_mask:
a : Any = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Optional[int] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __a ( self ) -> str:
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : List[Any] = self.prepare_config_and_inputs()
a : Optional[Any] = True
a : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = NezhaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
a : List[Any] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
a : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
a : Dict = True
a : Union[str, Any] = NezhaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : Dict = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
a : Union[str, Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
a : Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : List[str] = NezhaForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : str = NezhaForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : Optional[int] = NezhaForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : int = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : Optional[Any] = NezhaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = self.num_labels
a : List[str] = NezhaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : str = self.num_labels
a : Optional[int] = NezhaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : int = self.num_choices
a : int = NezhaForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Tuple:
a : List[Any] = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Dict = config_and_inputs
a : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : str =True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
a : Any = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
a : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
a : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def __a ( self ) -> str:
a : List[str] = NezhaModelTester(self )
a : int = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ) -> Dict:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCAmelCase )
def __a ( self ) -> str:
# This regression test was failing with PyTorch < 1.3
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
a : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
def __a ( self ) -> Dict:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ) -> Optional[int]:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ) -> Union[str, Any]:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def __a ( self ) -> Any:
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __a ( self ) -> str:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ) -> Dict:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = NezhaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
@require_torch_gpu
def __a ( self ) -> Union[str, Any]:
a, a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a : Optional[int] = True
a : str = model_class(config=_lowerCAmelCase )
a : Optional[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
a : Optional[int] = torch.jit.trace(
_lowerCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "bert.pt" ) )
a : Tuple = torch.jit.load(os.path.join(_lowerCAmelCase , "bert.pt" ) , map_location=_lowerCAmelCase )
loaded(inputs_dict["input_ids"].to(_lowerCAmelCase ) , inputs_dict["attention_mask"].to(_lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Tuple:
a : Dict = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
a : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
a : List[str] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
a : Tuple = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Union[str, Any]:
a : str = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
a : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a : Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a : List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
a : Tuple = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _lowerCAmelCase )
a : int = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 633 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "donut-swin"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , **_lowerCAmelCase , ) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :Any ) -> None:
'''simple docstring'''
_a : str =data
_a : Any =None
_a : List[Any] =None
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Node | None ) -> Dict: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Node | None ) -> Dict:
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Node ) -> int:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: # Main function for testing.
_a : List[str] =Node(1 )
_a : List[Any] =Node(2 )
_a : Optional[Any] =Node(3 )
_a : str =Node(4 )
_a : Union[str, Any] =Node(5 )
_a : str =Node(6 )
_a : Dict =Node(7 )
_a : int =Node(8 )
_a : Dict =Node(9 )
print(is_full_binary_tree(SCREAMING_SNAKE_CASE_ ) )
print(depth_of_tree(SCREAMING_SNAKE_CASE_ ) )
print("""Tree is: """ )
display(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = field(default="""language-modeling""", metadata={"""include_in_asdict_even_if_is_default""": True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({} )
SCREAMING_SNAKE_CASE_ : str = "text"
@property
def lowercase_ ( self : int)-> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 346 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[Any] = AutoencoderKL
__lowerCamelCase : List[Any] = "sample"
__lowerCamelCase : Tuple = 1e-2
@property
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = 4
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def _snake_case ( self ) -> Any:
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
return (3, 32, 32)
def _snake_case ( self ) -> str:
_lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
pass
def _snake_case ( self ) -> Any:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _snake_case ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
_lowerCAmelCase , _lowerCAmelCase = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase = torch.randn_like(_lowerCAmelCase )
_lowerCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase = dict(model.named_parameters() )
_lowerCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
_lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_lowerCAmelCase = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCAmelCase = torch.manual_seed(0 )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase = image.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
_lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_lowerCAmelCase = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy'''
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 3, 512, 512) , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def _snake_case ( self , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = "fp16" if fpaa else None
_lowerCAmelCase = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def _snake_case ( self , _lowerCAmelCase=0 ) -> str:
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _snake_case ( self , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.get_sd_vae_model()
_lowerCAmelCase = self.get_sd_image(_lowerCAmelCase )
_lowerCAmelCase = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model.encode(_lowerCAmelCase ).latent_dist
_lowerCAmelCase = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
_lowerCAmelCase = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 18 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__SCREAMING_SNAKE_CASE = 'fp16'
self.assertFalse(is_safetensors_compatible(_lowerCAmelCase , variant=_lowerCAmelCase ) )
| 148 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : str = "gpt_bigcode"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[str] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _lowerCAmelCase=50257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=50256 , _lowerCAmelCase=50256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = attention_softmax_in_fpaa
_lowerCAmelCase = scale_attention_softmax_in_fpaa
_lowerCAmelCase = multi_query
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase : Dict = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = "ernie_m"
_UpperCamelCase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self ,_lowerCAmelCase = 25_00_02 ,_lowerCAmelCase = 7_68 ,_lowerCAmelCase = 12 ,_lowerCAmelCase = 12 ,_lowerCAmelCase = 30_72 ,_lowerCAmelCase = "gelu" ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 5_14 ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 1E-05 ,_lowerCAmelCase=None ,_lowerCAmelCase=False ,_lowerCAmelCase=0.0 ,**_lowerCAmelCase ,):
super().__init__(pad_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = is_decoder
lowerCamelCase__ = act_dropout
| 50 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : List[Any] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = "lilt"
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=None , A_=4 , A_=1024 , **A_ , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase: List[str] = vocab_size
_lowercase: Optional[Any] = hidden_size
_lowercase: Union[str, Any] = num_hidden_layers
_lowercase: Union[str, Any] = num_attention_heads
_lowercase: List[Any] = hidden_act
_lowercase: List[Any] = intermediate_size
_lowercase: int = hidden_dropout_prob
_lowercase: List[str] = attention_probs_dropout_prob
_lowercase: Tuple = max_position_embeddings
_lowercase: Optional[int] = type_vocab_size
_lowercase: str = initializer_range
_lowercase: Optional[int] = layer_norm_eps
_lowercase: Dict = position_embedding_type
_lowercase: int = classifier_dropout
_lowercase: Optional[Any] = channel_shrink_ratio
_lowercase: Optional[Any] = max_ad_position_embeddings
| 353 |
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = (DDPMParallelScheduler,)
def _snake_case ( self , **_lowerCAmelCase ) -> int:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self ) -> List[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def _snake_case ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def _snake_case ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = self.dummy_sample_deter + 0.1
_lowerCAmelCase = self.dummy_sample_deter - 0.1
_lowerCAmelCase = samplea.shape[0]
_lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
_lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCAmelCase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCAmelCase ):
if i == len(_lowerCAmelCase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowerCAmelCase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
def _lowerCamelCase( a , a = 0 ):
__a = length or len(SCREAMING_SNAKE_CASE_ )
__a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__a , __a = list_data[i + 1], list_data[i]
__a = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , _lowerCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase = " ".join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("NFKD" , _lowerCAmelCase )
_lowerCAmelCase = "".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
_lowerCAmelCase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 18 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray]
__SCREAMING_SNAKE_CASE : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 412 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_UpperCamelCase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
SCREAMING_SNAKE_CASE = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.task_name.lower()
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = "train"
SCREAMING_SNAKE_CASE = "dev"
SCREAMING_SNAKE_CASE = "test"
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self ,A ,A ,A = None ,A = Split.train ,A = None ,):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ,_lowerCAmelCase ,)
UpperCAmelCase = args
UpperCAmelCase = glue_processors[args.task_name]()
UpperCAmelCase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
try:
UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' ,)
UpperCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase = label_list[2], label_list[1]
UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
UpperCAmelCase = time.time()
UpperCAmelCase = torch.load(_lowerCAmelCase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase = examples[:limit_length]
UpperCAmelCase = glue_convert_examples_to_features(
_lowerCAmelCase ,_lowerCAmelCase ,max_length=args.max_seq_length ,label_list=_lowerCAmelCase ,output_mode=self.output_mode ,)
UpperCAmelCase = time.time()
torch.save(self.features ,_lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self ,A ):
return self.features[i]
def _UpperCamelCase ( self ):
return self.label_list
| 341 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = BertJapaneseTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
__snake_case : Optional[int] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case , __snake_case : Tuple = self.get_input_output_texts(_lowerCAmelCase )
__snake_case : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
return text, ids
def lowercase_ ( self ):
pass # TODO add if relevant
def lowercase_ ( self ):
pass # TODO add if relevant
def lowercase_ ( self ):
pass # TODO add if relevant
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
__snake_case : Tuple = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_lowerCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self ):
__snake_case : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_lowerCAmelCase )
__snake_case : Optional[int] = 'こんにちは、世界。\nこんばんは、世界。'
__snake_case : int = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__snake_case : List[str] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCAmelCase , 'wb' ) as handle:
pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , 'rb' ) as handle:
__snake_case : str = pickle.load(_lowerCAmelCase )
__snake_case : Tuple = tokenizer_new.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowercase_ ( self ):
__snake_case : int = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self ):
try:
__snake_case : Optional[Any] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self ):
try:
__snake_case : Any = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self ):
__snake_case : Optional[int] = MecabTokenizer(do_lower_case=_lowerCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self ):
try:
__snake_case : Optional[int] = MecabTokenizer(
do_lower_case=_lowerCAmelCase , normalize_text=_lowerCAmelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self ):
__snake_case : List[str] = MecabTokenizer(normalize_text=_lowerCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self ):
__snake_case : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_lowerCAmelCase )
__snake_case : Dict = 'こんにちは、世界。\nこんばんは、世界。'
__snake_case : Any = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCAmelCase , 'wb' ) as handle:
pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , 'rb' ) as handle:
__snake_case : int = pickle.load(_lowerCAmelCase )
__snake_case : List[Any] = tokenizer_new.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_sudachi
def lowercase_ ( self ):
__snake_case : Dict = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self ):
__snake_case : List[str] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self ):
__snake_case : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self ):
__snake_case : Any = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self ):
__snake_case : Dict = SudachiTokenizer(do_lower_case=_lowerCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self ):
__snake_case : str = SudachiTokenizer(normalize_text=_lowerCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self ):
__snake_case : str = SudachiTokenizer(trim_whitespace=_lowerCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self ):
__snake_case : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_lowerCAmelCase )
__snake_case : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
__snake_case : str = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__snake_case : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_lowerCAmelCase , 'wb' ) as handle:
pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(_lowerCAmelCase , 'rb' ) as handle:
__snake_case : str = pickle.load(_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer_new.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_jumanpp
def lowercase_ ( self ):
__snake_case : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self ):
__snake_case : str = JumanppTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self ):
__snake_case : Dict = JumanppTokenizer(normalize_text=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self ):
__snake_case : List[str] = JumanppTokenizer(trim_whitespace=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self ):
__snake_case : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self ):
__snake_case : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__snake_case : List[Any] = {}
for i, token in enumerate(_lowerCAmelCase ):
__snake_case : Dict = i
__snake_case : int = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self ):
__snake_case : Optional[int] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__snake_case : List[str] = tokenizer.subword_tokenizer
__snake_case : str = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_lowerCAmelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__snake_case : str = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_lowerCAmelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self ):
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__snake_case : List[str] = tokenizer.encode('ありがとう。' , add_special_tokens=_lowerCAmelCase )
__snake_case : int = tokenizer.encode('どういたしまして。' , add_special_tokens=_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = BertJapaneseTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self , **_UpperCAmelCase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_lowerCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
__snake_case : int = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self ):
pass # TODO add if relevant
def lowercase_ ( self ):
pass # TODO add if relevant
def lowercase_ ( self ):
pass # TODO add if relevant
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__snake_case : int = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_lowerCAmelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self ):
__snake_case : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__snake_case : Optional[int] = {}
for i, token in enumerate(_lowerCAmelCase ):
__snake_case : Union[str, Any] = i
__snake_case : Optional[int] = CharacterTokenizer(vocab=_lowerCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__snake_case : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=_lowerCAmelCase )
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : int = 'cl-tohoku/bert-base-japanese'
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : str = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__snake_case : Optional[int] = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 576 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
"""simple docstring"""
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Any = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=16 , __snake_case=2 , __snake_case=4 , __snake_case=4 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=32 , __snake_case=2 , __snake_case=1 , __snake_case=0 , __snake_case=0.02 , ):
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
_SCREAMING_SNAKE_CASE : Tuple = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Tuple = use_labels
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : int = eos_token_id
_SCREAMING_SNAKE_CASE : List[str] = pad_token_id
_SCREAMING_SNAKE_CASE : str = bos_token_id
_SCREAMING_SNAKE_CASE : int = initializer_range
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_SCREAMING_SNAKE_CASE : int = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_SCREAMING_SNAKE_CASE : List[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : int = 20
_SCREAMING_SNAKE_CASE : List[Any] = model_class_name(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : Tuple = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_SCREAMING_SNAKE_CASE : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : Any = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_SCREAMING_SNAKE_CASE : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = 20
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE : Tuple = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_SCREAMING_SNAKE_CASE : Any = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
A_ : str = 99
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_SCREAMING_SNAKE_CASE : Optional[int] = input_ids.shape[0]
_SCREAMING_SNAKE_CASE : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._get_config_and_data()
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = lm_model(input_ids=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _lowerCAmelCase )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , _lowerCAmelCase )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_SCREAMING_SNAKE_CASE : str = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_SCREAMING_SNAKE_CASE : Any = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_SCREAMING_SNAKE_CASE : int = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase__ ( _snake_case , unittest.TestCase , _snake_case ):
'''simple docstring'''
A_ : List[Any] = True
A_ : List[str] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A_ : Optional[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = FlaxBlenderbotSmallModelTester(self )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(__snake_case , __snake_case=None , **__snake_case ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Any = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCAmelCase )
_SCREAMING_SNAKE_CASE : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_SCREAMING_SNAKE_CASE : int = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case , __snake_case , __snake_case ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Union[str, Any] = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self ):
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[Any] = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_SCREAMING_SNAKE_CASE : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 533 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def __a ( self ) -> Optional[Any]:
torch.manual_seed(0 )
a : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def __a ( self ) -> Optional[int]:
torch.manual_seed(0 )
a : int = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def __a ( self ) -> Any:
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCAmelCase )
def __a ( self ) -> List[Any]:
a : Any = self.dummy_uncond_unet
a : List[str] = DDIMScheduler()
a : Tuple = self.dummy_vq_model
a : str = LDMPipeline(unet=_lowerCAmelCase , vqvae=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
a : Optional[int] = torch.manual_seed(0 )
a : Optional[Any] = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" ).images
a : List[str] = torch.manual_seed(0 )
a : Union[str, Any] = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=_lowerCAmelCase )[0]
a : Tuple = image[0, -3:, -3:, -1]
a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Optional[int] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
a : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
a : str = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
a : List[str] = torch.manual_seed(0 )
a : Tuple = ldm(generator=_lowerCAmelCase , num_inference_steps=5 , output_type="numpy" ).images
a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
a : Dict = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
a : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 633 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict:
def wrapper(*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Tuple ):
_a : Any =timeit.default_timer()
_a : Optional[Any] =func(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
_a : Any =timeit.default_timer() - starttime
return delta
_a : Optional[Any] =func.__name__
return wrapper
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : dict ,_UpperCAmelCase : List[Any]=100 ,_UpperCAmelCase : Optional[int]=None ) -> Union[str, Any]:
_a : Tuple =[]
_a : Dict =seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE_ ):
_a : List[Any] ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE_ ,_ArrayXD ):
_a : List[Any] =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE_ ,datasets.Value ):
if v.dtype == "string":
_a : str ="""The small grey turtle was surprisingly fast when challenged."""
else:
_a : int =np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE_ ,datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE_ ,datasets.Sequence ):
_a : List[Any] =v.feature
_a : Tuple =seq_shapes[k]
_a : Any =np.random.rand(*SCREAMING_SNAKE_CASE_ ).astype(v.dtype )
_a : Optional[int] =data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict=100 ,_UpperCAmelCase : int=None ) -> Tuple:
_a : Optional[Any] =generate_examples(SCREAMING_SNAKE_CASE_ ,num_examples=SCREAMING_SNAKE_CASE_ ,seq_shapes=SCREAMING_SNAKE_CASE_ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE_ ,path=SCREAMING_SNAKE_CASE_ ) as writer:
for key, record in dummy_data:
_a : List[str] =features.encode_example(SCREAMING_SNAKE_CASE_ )
writer.write(SCREAMING_SNAKE_CASE_ )
_a , _a : str =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
_a : Dict =datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE_ ,info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE_ ) )
return dataset
| 694 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = (DPMSolverSinglestepScheduler,)
__lowerCamelCase : int = (("num_inference_steps", 25),)
def _snake_case ( self , **_lowerCAmelCase ) -> Any:
_lowerCAmelCase = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> int:
pass
def _snake_case ( self , _lowerCAmelCase=0 , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
_lowerCAmelCase = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
_lowerCAmelCase = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Tuple:
if scheduler is None:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_lowerCAmelCase )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = 50
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase = self.full_loop(scheduler=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> str:
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
_lowerCAmelCase = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self ) -> str:
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
_lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
_lowerCAmelCase = scheduler_class(**_lowerCAmelCase )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 346 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_lowerCAmelCase )
@property
def _A ( self ):
'''simple docstring'''
def extract(*_A , **_A ):
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.ones([0] )
def _A ( self , _A ):
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_lowerCAmelCase , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_lowerCAmelCase , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(pipe.scheduler , _lowerCAmelCase )
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__SCREAMING_SNAKE_CASE = unet.half()
__SCREAMING_SNAKE_CASE = vae.half()
__SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__SCREAMING_SNAKE_CASE = 4_003_660_346
__SCREAMING_SNAKE_CASE = 7
# without safety guidance (sld_guidance_scale = 0)
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'padme amidala taking a bath artwork, safe for work, no nudity'
__SCREAMING_SNAKE_CASE = 2_734_971_755
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__SCREAMING_SNAKE_CASE = 1_044_355_234
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 148 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18 | 0 |
'''simple docstring'''
from math import sqrt
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , SCREAMING_SNAKE_CASE_ , )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
_lowercase: Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase , _lowercase: str = image[0].size
_lowercase , _lowercase: Tuple = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowercase: Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_lowercase: Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 )
_lowercase: Union[str, Any] = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
_lowercase: Tuple = image.transpose(0 , 3 , 1 , 2 )
_lowercase: Optional[Any] = 2.0 * image - 1.0
_lowercase: str = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
elif isinstance(image[0] , torch.Tensor ):
_lowercase: int = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
return image
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return mask
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
_lowercase: Tuple = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowercase , _lowercase: Optional[Any] = mask[0].size
_lowercase , _lowercase: int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowercase: str = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
_lowercase: List[str] = np.concatenate(SCREAMING_SNAKE_CASE_ , axis=0 )
_lowercase: str = mask.astype(np.floataa ) / 255.0
_lowercase: Optional[Any] = 0
_lowercase: int = 1
_lowercase: int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
elif isinstance(mask[0] , torch.Tensor ):
_lowercase: Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
return mask
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __init__( self , A_ , A_ ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , A_ , A_ , A_ = 250 , A_ = 0.0 , A_ = 10 , A_ = 10 , A_ = None , A_ = "pil" , A_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
_lowercase: List[Any] = image
_lowercase: Any = _preprocess_image(_lowerCAmelCase )
_lowercase: List[str] = original_image.to(device=self.device , dtype=self.unet.dtype )
_lowercase: Tuple = _preprocess_mask(_lowerCAmelCase )
_lowercase: Tuple = mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowercase: Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowercase: Tuple = original_image.shape
_lowercase: Optional[Any] = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.device )
_lowercase: int = eta
_lowercase: Any = self.scheduler.timesteps[0] + 1
_lowercase: Tuple = generator[0] if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowercase: List[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
_lowercase: Optional[int] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowercase: str = self.scheduler.undo_step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase: Optional[int] = t
_lowercase: List[str] = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase: Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase: Any = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 353 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
_SCREAMING_SNAKE_CASE = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(_lowerCAmelCase , "" , _lowerCAmelCase ) for x in references] )
else:
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
if ignore_case:
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
_lowerCAmelCase = np.char.lower(_lowerCAmelCase )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans("" , "" , string.digits )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = np.char.translate(_lowerCAmelCase , table=_lowerCAmelCase )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(_lowerCAmelCase ) * 100}
| 18 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case__ ( snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Optional[Any] = AutoencoderKL
_snake_case : List[Any] = "sample"
_snake_case : Tuple = 1E-2
@property
def a__ ( self ):
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def a__ ( self ):
return (3, 32, 32)
@property
def a__ ( self ):
return (3, 32, 32)
def a__ ( self ):
__a = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def a__ ( self ):
pass
def a__ ( self ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def a__ ( self ):
# enable deterministic behavior for gradient checkpointing
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_lowerCAmelCase )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def a__ ( self ):
__a , __a = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(_lowerCAmelCase )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a__ ( self ):
__a = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__a = model.to(_lowerCAmelCase )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_lowerCAmelCase )
with torch.no_grad():
__a = model(_lowerCAmelCase , sample_posterior=_lowerCAmelCase , generator=_lowerCAmelCase ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class snake_case__ ( unittest.TestCase ):
def a__ ( self , lowerCamelCase , lowerCamelCase ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def a__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , lowerCamelCase=0 , lowerCamelCase=(4, 3, 512, 512) , lowerCamelCase=False ):
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) ).to(_lowerCAmelCase ).to(_lowerCAmelCase )
return image
def a__ ( self , lowerCamelCase="CompVis/stable-diffusion-v1-4" , lowerCamelCase=False ):
__a = "fp16" if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_lowerCAmelCase , subfolder="vae" , torch_dtype=_lowerCAmelCase , revision=_lowerCAmelCase , )
model.to(_lowerCAmelCase ).eval()
return model
def a__ ( self , lowerCamelCase=0 ):
if torch_device == "mps":
return torch.manual_seed(_lowerCAmelCase )
return torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_lowerCAmelCase )
__a = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
__a = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
__a = self.get_sd_image(_lowerCAmelCase , fpaa=_lowerCAmelCase )
__a = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
__a = model(_lowerCAmelCase , generator=_lowerCAmelCase , sample_posterior=_lowerCAmelCase ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_lowerCAmelCase )
with torch.no_grad():
__a = model(_lowerCAmelCase ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
__a = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
__a = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def a__ ( self , lowerCamelCase ):
__a = self.get_sd_vae_model(fpaa=_lowerCAmelCase )
__a = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCAmelCase )
with torch.no_grad():
__a = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def a__ ( self , lowerCamelCase ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_lowerCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_lowerCAmelCase )
__a = self.get_generator(_lowerCAmelCase )
with torch.no_grad():
__a = model.encode(_lowerCAmelCase ).latent_dist
__a = dist.sample(generator=_lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_lowerCAmelCase )
__a = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase )
| 528 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18 | 0 |
import math
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return math.sqrt(SCREAMING_SNAKE_CASE_ ) * math.sqrt(SCREAMING_SNAKE_CASE_ ) == num
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = 0
lowercase_ = n
while left <= right:
lowercase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase_ = mid - 1
else:
lowercase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase = """\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"""
def _a ( _snake_case , _snake_case , _snake_case=8 ):
"""simple docstring"""
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A ,A ,A ,):
super().__init__()
self.register_modules(
unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,movq=_lowerCAmelCase ,)
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ):
if latents is None:
UpperCAmelCase = randn_tensor(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase = latents.to(_lowerCAmelCase )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self ,A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase ,_lowerCAmelCase )
def _UpperCamelCase ( self ,A=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(_lowerCAmelCase ,_lowerCAmelCase ,prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self ):
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self ,A ,A ,A = 512 ,A = 512 ,A = 100 ,A = 4.0 ,A = 1 ,A = None ,A = None ,A = "pil" ,A = True ,):
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
UpperCAmelCase = torch.cat(_lowerCAmelCase ,dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
UpperCAmelCase = torch.cat(_lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(_lowerCAmelCase ,dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase ,device=_lowerCAmelCase )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(_lowerCAmelCase ,_lowerCAmelCase ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {"""image_embeds""": image_embeds}
UpperCAmelCase = self.unet(
sample=_lowerCAmelCase ,timestep=_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ,added_cond_kwargs=_lowerCAmelCase ,return_dict=_lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,generator=_lowerCAmelCase ,)[0]
# post-processing
UpperCAmelCase = self.movq.decode(_lowerCAmelCase ,force_not_quantize=_lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 ,1 )
UpperCAmelCase = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 341 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[int] = "deit"
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=16 , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = encoder_stride
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
| 18 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO Update this
__magic_name__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "esm"
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_026 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , mask_token_id=_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : int = vocab_size
__snake_case : Dict = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : List[Any] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : List[str] = use_cache
__snake_case : Dict = emb_layer_norm_before
__snake_case : Any = token_dropout
__snake_case : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__snake_case : Optional[Any] = EsmFoldConfig()
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : List[str] = EsmFoldConfig(**_lowerCAmelCase )
__snake_case : Any = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__snake_case : int = get_default_vocab_list()
else:
__snake_case : Optional[int] = vocab_list
else:
__snake_case : str = None
__snake_case : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , _lowerCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ):
__snake_case : Dict = super().to_dict()
if isinstance(self.esmfold_config , _lowerCAmelCase ):
__snake_case : int = self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = 1_2_8
__UpperCAmelCase = None
def lowercase_ ( self ):
if self.trunk is None:
__snake_case : int = TrunkConfig()
elif isinstance(self.trunk , _lowerCAmelCase ):
__snake_case : Tuple = TrunkConfig(**self.trunk )
def lowercase_ ( self ):
__snake_case : Optional[int] = asdict(self )
__snake_case : Tuple = self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 4_8
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 1_2_8
__UpperCAmelCase = 3_2
__UpperCAmelCase = 3_2
__UpperCAmelCase = 3_2
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = False
__UpperCAmelCase = 4
__UpperCAmelCase = 1_2_8
__UpperCAmelCase = None
def lowercase_ ( self ):
if self.structure_module is None:
__snake_case : int = StructureModuleConfig()
elif isinstance(self.structure_module , _lowerCAmelCase ):
__snake_case : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__snake_case : Any = self.sequence_state_dim // self.sequence_head_width
__snake_case : Dict = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowercase_ ( self ):
__snake_case : Tuple = asdict(self )
__snake_case : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 3_8_4
__UpperCAmelCase = 1_2_8
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_2_8
__UpperCAmelCase = 1_2
__UpperCAmelCase = 4
__UpperCAmelCase = 8
__UpperCAmelCase = 0.1
__UpperCAmelCase = 8
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 7
__UpperCAmelCase = 1_0
__UpperCAmelCase = 1E-8
__UpperCAmelCase = 1E5
def lowercase_ ( self ):
return asdict(self )
def UpperCAmelCase__( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 576 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase__ ( __snake_case, __snake_case=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = {}
for old_key in state_dict.keys():
_UpperCamelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCamelCase = key.replace('''moe_layer.experts.0''', F'''ffn.experts.expert_{expert_idx}''' )
else:
_UpperCamelCase = key.replace('''moe_layer.experts.''', '''ffn.experts.expert_''' )
if "gate" in key:
_UpperCamelCase = key.replace('''.moe_layer.gate.wg''', '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
_UpperCamelCase = key.replace('''.fc2.''', '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
_UpperCamelCase = key.replace('''.fc1.''', '''.ffn.fc1.''' )
if ".encoder_attn." in key:
_UpperCamelCase = key.replace('''.encoder_attn.''', '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
_UpperCamelCase = key.replace('''encoder_attn_layer_norm''', '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
_UpperCamelCase = key.replace('''final_layer_norm''', '''ff_layer_norm''' )
_UpperCamelCase = state_dict[old_key]
return new_dict
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case = WEIGHTS_NAME ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
os.makedirs(__snake_case, exist_ok=__snake_case )
for expert in range(__snake_case ):
_UpperCamelCase = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
_UpperCamelCase = torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
_UpperCamelCase = rename_fairseq_keys(__snake_case, __snake_case )
_UpperCamelCase = os.path.join(
__snake_case, weights_name.replace('''.bin''', F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case, __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
_UpperCamelCase = os.path.join(__snake_case, weights_name.replace('''.bin''', F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
_UpperCamelCase = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
_UpperCamelCase = rename_fairseq_keys(__snake_case, __snake_case )
_UpperCamelCase = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
_UpperCamelCase = os.path.join(__snake_case, __snake_case )
torch.save(__snake_case, __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case, __snake_case )
# Otherwise, let's build the index
_UpperCamelCase = {}
for idx, shard in enumerate(__snake_case ):
_UpperCamelCase = weights_name.replace('''.bin''', F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
_UpperCamelCase = os.path.join(__snake_case, weights_name.replace('''.bin''', F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case, os.path.join(__snake_case, __snake_case ) )
for key in shard:
_UpperCamelCase = shard_file
# Add the metadata
_UpperCamelCase = {'''total_size''': total_size}
_UpperCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case, __snake_case ), '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = json.dumps(__snake_case, indent=2, sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a = parser.parse_args()
_a , _a = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_a = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_a = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 19 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = [1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0, 0, 0
_UpperCamelCase = ugly_nums[ia] * 2
_UpperCamelCase = ugly_nums[ia] * 3
_UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1, __snake_case ):
_UpperCamelCase = min(__snake_case, __snake_case, __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
_UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(200) = }""")
| 19 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 1 |
"""simple docstring"""
import math
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(__snake_case ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( __snake_case = 1_00_01 ) -> int:
"""simple docstring"""
try:
_UpperCamelCase = int(__snake_case )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
_UpperCamelCase = []
_UpperCamelCase = 2
while len(__snake_case ) < nth:
if is_prime(__snake_case ):
primes.append(__snake_case )
num += 1
else:
num += 1
return primes[len(__snake_case ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = LxmertConfig.from_json_file(__snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
_UpperCamelCase = LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case, __snake_case, __snake_case )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), __snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 19 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 1 |
"""simple docstring"""
from itertools import product
def lowerCamelCase__ ( __snake_case, __snake_case ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = sides_number
_UpperCamelCase = max_face_number * dice_number
_UpperCamelCase = [0] * (max_total + 1)
_UpperCamelCase = 1
_UpperCamelCase = range(__snake_case, max_face_number + 1 )
for dice_numbers in product(__snake_case, repeat=__snake_case ):
_UpperCamelCase = sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ) -> float:
"""simple docstring"""
_UpperCamelCase = total_frequency_distribution(
sides_number=4, dice_number=9 )
_UpperCamelCase = total_frequency_distribution(
sides_number=6, dice_number=6 )
_UpperCamelCase = 0
_UpperCamelCase = 9
_UpperCamelCase = 4 * 9
_UpperCamelCase = 6
for peter_total in range(__snake_case, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCamelCase = (4**9) * (6**6)
_UpperCamelCase = peter_wins_count / total_games_number
_UpperCamelCase = round(__snake_case, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
], )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_UpperCamelCase = features.copy()
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = JsonDatasetReader(__snake_case, features=__snake_case, cache_dir=__snake_case ).read()
assert isinstance(__snake_case, __snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case, split=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = jsonl_path
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [jsonl_path]
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_json_dataset(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=("train",) ) -> str:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case )
for split in splits:
_UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=__snake_case, keep_in_memory=__snake_case ).read()
_check_json_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader({'''train''': jsonl_path}, features=__snake_case, cache_dir=__snake_case ).read()
_check_json_datasetdict(__snake_case, __snake_case )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if split:
_UpperCamelCase = {split: jsonl_path}
else:
_UpperCamelCase = '''train'''
_UpperCamelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(__snake_case, cache_dir=__snake_case ).read()
_check_json_datasetdict(__snake_case, __snake_case, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return json.load(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
return [json.loads(__snake_case ) for line in buffer]
class _UpperCAmelCase:
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a).write()
buffer.seek(0)
_UpperCamelCase = load_json_function(__a)
assert isinstance(__a , __a)
assert isinstance(exported_content[0] , __a)
assert len(__a) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a).write()
buffer.seek(0)
_UpperCamelCase = load_json(__a)
assert isinstance(__a , __a)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(__a) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def UpperCAmelCase ( self , __a , __a , __a) -> int:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , num_proc=2).write()
buffer.seek(0)
_UpperCamelCase = load_json_function(__a)
assert isinstance(__a , __a)
assert isinstance(exported_content[0] , __a)
assert len(__a) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a , num_proc=2).write()
buffer.seek(0)
_UpperCamelCase = load_json(__a)
assert isinstance(__a , __a)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 10
else:
assert len(__a) == 10
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
with pytest.raises(__a):
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = tmp_path_factory.mktemp('''data''') / F'''test.json.{extension}'''
_UpperCamelCase = str(shared_datadir / F'''test_file.json.{extension}''')
JsonDatasetWriter(__a , __a , compression=__a).write()
with fsspec.open(__a , '''rb''' , compression='''infer''') as f:
_UpperCamelCase = f.read()
with fsspec.open(__a , '''rb''' , compression='''infer''') as f:
_UpperCamelCase = f.read()
assert exported_content == original_content
| 19 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bridgetower_vision_model'
def __init__( self , __a=7_68 , __a=12 , __a=3 , __a=16 , __a=2_88 , __a=1 , __a=1e-05 , __a=False , __a=True , __a=False , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_channels
_UpperCamelCase = patch_size
_UpperCamelCase = image_size
_UpperCamelCase = initializer_factor
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = stop_gradient
_UpperCamelCase = share_layernorm
_UpperCamelCase = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> "PretrainedConfig":
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(__a , **__a)
if config_dict.get('''model_type''') == "bridgetower":
_UpperCamelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__a , **__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bridgetower_text_model'
def __init__( self , __a=5_02_65 , __a=7_68 , __a=12 , __a=12 , __a=1 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=1e-05 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , **__a , ) -> Any:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_factor
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> "PretrainedConfig":
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(__a , **__a)
if config_dict.get('''model_type''') == "bridgetower":
_UpperCamelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__a , **__a)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bridgetower'
def __init__( self , __a=True , __a="gelu" , __a=7_68 , __a=1 , __a=1e-05 , __a=False , __a="add" , __a=12 , __a=6 , __a=False , __a=False , __a=None , __a=None , **__a , ) -> Optional[int]:
'''simple docstring'''
# TODO: remove this once the Hub files are updated.
_UpperCamelCase = kwargs.pop('''text_config_dict''' , __a)
_UpperCamelCase = kwargs.pop('''vision_config_dict''' , __a)
super().__init__(**__a)
_UpperCamelCase = share_cross_modal_transformer_layers
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_size
_UpperCamelCase = initializer_factor
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = share_link_tower_layers
_UpperCamelCase = link_tower_type
_UpperCamelCase = num_attention_heads
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = init_layernorm_from_vision_encoder
if text_config is None:
_UpperCamelCase = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''')
if vision_config is None:
_UpperCamelCase = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''')
_UpperCamelCase = BridgeTowerTextConfig(**__a)
_UpperCamelCase = BridgeTowerVisionConfig(**__a)
@classmethod
def UpperCAmelCase ( cls , __a , __a , **__a) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 19 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_a = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__snake_case, torch.Tensor ):
return image
elif isinstance(__snake_case, PIL.Image.Image ):
_UpperCamelCase = [image]
_UpperCamelCase = [trans(img.convert('''RGB''' ) ) for img in image]
_UpperCamelCase = torch.stack(__snake_case )
return image
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__a , scheduler=__a)
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Dict:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCamelCase = min(int(num_inference_steps * strength) , __a)
_UpperCamelCase = max(num_inference_steps - init_timestep , 0)
_UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=None) -> int:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a)}''')
_UpperCamelCase = image.to(device=__a , dtype=__a)
if isinstance(__a , __a) and len(__a) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__a)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_UpperCamelCase = init_latents.shape
_UpperCamelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
# get latents
print('''add noise to latents at timestep''' , __a)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , __a)
_UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__a)
# 2. Preprocess image
_UpperCamelCase = preprocess(__a)
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device)
_UpperCamelCase , _UpperCamelCase = self.get_timesteps(__a , __a , self.device)
_UpperCamelCase = timesteps[:1].repeat(__a)
# 4. Prepare latent variables
_UpperCamelCase = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a)
_UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__a):
# 1. predict noise model_output
_UpperCamelCase = self.unet(__a , __a).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a)
| 19 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCAmelCase ( self , __a , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = ZeroShotClassificationPipeline(
model=__a , tokenizer=__a , candidate_labels=['''polics''', '''health'''])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''')
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
# No kwarg
_UpperCamelCase = classifier('''Who are you voting for in 2020?''' , ['''politics'''])
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
_UpperCamelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''])
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
_UpperCamelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''')
self.assertEqual(
__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
_UpperCamelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''])
self.assertEqual(
__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]})
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''])) , 1.0)
_UpperCamelCase = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''')
self.assertEqual(__a , {'''sequence''': ANY(__a), '''labels''': [ANY(__a)], '''scores''': [ANY(__a)]})
# https://github.com/huggingface/transformers/issues/13846
_UpperCamelCase = classifier(['''I am happy'''] , ['''positive''', '''negative'''])
self.assertEqual(
__a , [
{'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]}
for i in range(1)
] , )
_UpperCamelCase = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''])
self.assertEqual(
__a , [
{'''sequence''': ANY(__a), '''labels''': [ANY(__a), ANY(__a)], '''scores''': [ANY(__a), ANY(__a)]}
for i in range(2)
] , )
with self.assertRaises(__a):
classifier('''''' , candidate_labels='''politics''')
with self.assertRaises(__a):
classifier(__a , candidate_labels='''politics''')
with self.assertRaises(__a):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''')
with self.assertRaises(__a):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__a)
with self.assertRaises(__a):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__a):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__a , )
self.run_entailment_id(__a)
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = zero_shot_classifier.model.config
_UpperCamelCase = config.labelaid
_UpperCamelCase = zero_shot_classifier.entailment_id
_UpperCamelCase = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
_UpperCamelCase = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
_UpperCamelCase = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
_UpperCamelCase = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
_UpperCamelCase = original_labelaid
self.assertEqual(__a , zero_shot_classifier.entailment_id)
@require_torch
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''])
@require_torch
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
_UpperCamelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
_UpperCamelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''')
_UpperCamelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
_UpperCamelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''')
_UpperCamelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''])
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
_UpperCamelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__a , )
self.assertEqual(
nested_simplify(__a) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 19 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 1 |
"""simple docstring"""
import argparse
import datetime
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_UpperCamelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__snake_case ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_UpperCamelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_UpperCamelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_UpperCamelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_UpperCamelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_UpperCamelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_UpperCamelCase = datetime.date(int(__snake_case ), int(__snake_case ), int(__snake_case ) )
# Start math
if m <= 2:
_UpperCamelCase = y - 1
_UpperCamelCase = m + 12
# maths var
_UpperCamelCase = int(str(__snake_case )[:2] )
_UpperCamelCase = int(str(__snake_case )[2:] )
_UpperCamelCase = int(2.6 * m - 5.39 )
_UpperCamelCase = int(c / 4 )
_UpperCamelCase = int(k / 4 )
_UpperCamelCase = int(d + k )
_UpperCamelCase = int(t + u + v + x )
_UpperCamelCase = int(z - (2 * c) )
_UpperCamelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_UpperCamelCase = F'''Your date {date_input}, is a {days[str(__snake_case )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_a = parser.parse_args()
zeller(args.date_input)
| 19 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_a = logging.get_logger("""transformers.models.encodec""")
_a = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_a = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_a = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_a = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_a = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_a = []
_a = []
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
elif weight_type == "running_mean":
_UpperCamelCase = value
elif weight_type == "running_var":
_UpperCamelCase = value
elif weight_type == "num_batches_tracked":
_UpperCamelCase = value
elif weight_type == "weight_ih_l0":
_UpperCamelCase = value
elif weight_type == "weight_hh_l0":
_UpperCamelCase = value
elif weight_type == "bias_ih_l0":
_UpperCamelCase = value
elif weight_type == "bias_hh_l0":
_UpperCamelCase = value
elif weight_type == "weight_ih_l1":
_UpperCamelCase = value
elif weight_type == "weight_hh_l1":
_UpperCamelCase = value
elif weight_type == "bias_ih_l1":
_UpperCamelCase = value
elif weight_type == "bias_hh_l1":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCamelCase , _UpperCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
_UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
_UpperCamelCase = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__snake_case, __snake_case ):
logger.info(F'''{name} was ignored''' )
continue
_UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_UpperCamelCase , _UpperCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "weight_ih_l0" in name:
_UpperCamelCase = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_UpperCamelCase = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_UpperCamelCase = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_UpperCamelCase = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_UpperCamelCase = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_UpperCamelCase = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_UpperCamelCase = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_UpperCamelCase = '''bias_hh_l1'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
elif "running_mean" in name:
_UpperCamelCase = '''running_mean'''
elif "running_var" in name:
_UpperCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_UpperCamelCase = '''num_batches_tracked'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Dict:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase = EncodecConfig.from_pretrained(__snake_case )
else:
_UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_UpperCamelCase = [8, 5, 4, 4]
_UpperCamelCase = [2.2]
_UpperCamelCase = 64
_UpperCamelCase = 3_20_00
_UpperCamelCase = 20_48
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
elif model_name == "encodec_48khz":
_UpperCamelCase = [8, 5, 4, 2]
_UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
_UpperCamelCase = 4_80_00
_UpperCamelCase = 2
_UpperCamelCase = False
_UpperCamelCase = '''time_group_norm'''
_UpperCamelCase = True
_UpperCamelCase = 1.0
_UpperCamelCase = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_UpperCamelCase = EncodecModel(__snake_case )
_UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(__snake_case )
_UpperCamelCase = torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_UpperCamelCase = original_checkpoint['''best_state''']
recursively_load_weights(__snake_case, __snake_case, __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 19 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase:
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = d_model
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = prediction_length
_UpperCamelCase = context_length
_UpperCamelCase = cardinality
_UpperCamelCase = num_time_features
_UpperCamelCase = lags_sequence
_UpperCamelCase = embedding_dimension
_UpperCamelCase = is_training
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = context_length
_UpperCamelCase = prediction_length + label_length
_UpperCamelCase = label_length
_UpperCamelCase = moving_average
_UpperCamelCase = autocorrelation_factor
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = config.context_length + max(config.lags_sequence)
_UpperCamelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length])
_UpperCamelCase = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
_UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
_UpperCamelCase = floats_tensor([self.batch_size, config.prediction_length])
_UpperCamelCase = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.prepare_autoformer_inputs_dict(__a)
return config, inputs_dict
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AutoformerModel(config=__a).to(__a).eval()
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.encoder_last_hidden_state
_UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_encoder()
encoder.save_pretrained(__a)
_UpperCamelCase = AutoformerEncoder.from_pretrained(__a).to(__a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = model.create_network_inputs(**__a)
_UpperCamelCase , _UpperCamelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
_UpperCamelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCamelCase = encoder(inputs_embeds=__a)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
_UpperCamelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
_UpperCamelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCamelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCamelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = model.get_decoder()
decoder.save_pretrained(__a)
_UpperCamelCase = AutoformerDecoder.from_pretrained(__a).to(__a)
_UpperCamelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoformerModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a)
_UpperCamelCase , _UpperCamelCase = model_class.from_pretrained(__a , output_loading_info=__a)
self.assertEqual(info['''missing_keys'''] , [])
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a)
@unittest.skip(reason='''Model has no tokens embeddings''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = inspect.signature(getattr(__a , '''forward'''))
# The main input is the name of the argument after `self`
_UpperCamelCase = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''')
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
])
self.assertListEqual(arg_names[: len(__a)] , __a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
_UpperCamelCase = getattr(self.model_tester , '''seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''decoder_seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''encoder_seq_length''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''d_model''' , __a)
_UpperCamelCase = getattr(self.model_tester , '''num_attention_heads''' , __a)
_UpperCamelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCamelCase = len(__a)
_UpperCamelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a)
# decoder attentions
_UpperCamelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCamelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
self.assertEqual(out_len + 2 , len(__a))
_UpperCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ ( __snake_case="train-batch.pt" ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''', filename=__snake_case, repo_type='''dataset''' )
_UpperCamelCase = torch.load(__snake_case, map_location=__snake_case )
return batch
@require_torch
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch()
with torch.no_grad():
_UpperCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_UpperCamelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch('''val-batch.pt''')
with torch.no_grad():
_UpperCamelCase = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_UpperCamelCase = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a)
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''').to(__a)
_UpperCamelCase = prepare_batch('''val-batch.pt''')
with torch.no_grad():
_UpperCamelCase = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_UpperCamelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , __a)
_UpperCamelCase = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a)
_UpperCamelCase = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1))
| 19 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_56}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
def UpperCAmelCase ( self , __a , __a = None) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(__a):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(__a)):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__a)
_UpperCamelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_UpperCamelCase = logits.argmax(dim=1)
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 19 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_28 , __a=32 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = NezhaModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = True
_UpperCamelCase = NezhaModel(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = NezhaForNextSentencePrediction(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = NezhaForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = NezhaForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NezhaForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = NezhaForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> str:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = NezhaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , __a , __a , __a , )
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = NezhaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@slow
@require_torch_gpu
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=__a)
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = torch.jit.trace(
__a , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , '''bert.pt'''))
_UpperCamelCase = torch.jit.load(os.path.join(__a , '''bert.pt''') , map_location=__a)
loaded(inputs_dict['''input_ids'''].to(__a) , inputs_dict['''attention_mask'''].to(__a))
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''')
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 6, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''')
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 6, 2_11_28))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 19 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19 | 1 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 19 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_a = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowercase__ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowercase__ = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase__ = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowercase__ = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase__ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowercase__ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowercase__ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Source language id for translation.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Target language id for translation.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': '# num_beams to use for evaluation.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(__snake_case, os.path.join(__snake_case, F'''{split}_results.json''' ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''', __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__snake_case, __snake_case, __snake_case ):
assert hasattr(__snake_case, __snake_case ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__snake_case, __snake_case, getattr(__snake_case, __snake_case ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf='''.ckpt''' in model_args.model_name_or_path, config=__snake_case, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(__snake_case, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
__snake_case, type_path='''train''', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case, type_path='''val''', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case, type_path='''test''', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task, __snake_case ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=__snake_case, args=__snake_case, data_args=__snake_case, train_dataset=__snake_case, eval_dataset=__snake_case, data_collator=SeqaSeqDataCollator(
__snake_case, __snake_case, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=__snake_case, tokenizer=__snake_case, )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''', __snake_case, training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics['''val_loss'''], 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''', __snake_case, training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase = trainer.predict(test_dataset=__snake_case, metric_key_prefix='''test''' )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics['''test_loss'''], 4 )
handle_metrics('''test''', __snake_case, training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=__snake_case, clean_up_tokenization_spaces=__snake_case )
_UpperCamelCase = lmap(str.strip, __snake_case )
write_txt_file(__snake_case, os.path.join(training_args.output_dir, '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__snake_case, os.path.join(training_args.output_dir, '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 19 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = """bert-base-cased"""
_a = """google/pegasus-xsum"""
_a = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
_a = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
_a = """patrickvonplaten/t5-tiny-random"""
_a = """sshleifer/bart-tiny-random"""
_a = """sshleifer/tiny-mbart"""
_a = """sshleifer/tiny-marian-en-de"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = '''\n'''.join(__snake_case )
Path(__snake_case ).open('''w''' ).writelines(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case, F'''{split}.source''' ), __snake_case )
_dump_articles(os.path.join(__snake_case, F'''{split}.target''' ), __snake_case )
return tmp_dir
class _UpperCAmelCase( lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCAmelCase ( self , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
_UpperCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in ARTICLES)
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in SUMMARIES)
_UpperCamelCase = 4
_UpperCamelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_UpperCamelCase , _UpperCamelCase = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path='''train''' , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
_UpperCamelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__a , __a)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
_UpperCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in ARTICLES)
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in SUMMARIES)
_UpperCamelCase = 4
_UpperCamelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path='''train''' , max_source_length=20 , max_target_length=__a , )
_UpperCamelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
_UpperCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
_UpperCamelCase = tmp_dir.joinpath('''train.source''').open().readlines()
_UpperCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__a , __a , 1_28 , __a)
_UpperCamelCase = {x.name for x in tmp_dir.iterdir()}
_UpperCamelCase = {x.name for x in save_dir.iterdir()}
_UpperCamelCase = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a) < len(__a)
assert len(__a) == 1
assert len(packed_examples[0]) == sum(len(__a) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_dataset(max_len=64)
_UpperCamelCase = 64
_UpperCamelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a)
_UpperCamelCase = [len(__a) for x in batch_sampler]
assert len(set(__a)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a) == len(__a) # no dropped or added examples
_UpperCamelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2)
_UpperCamelCase = []
_UpperCamelCase = []
for batch in data_loader:
_UpperCamelCase = batch['''input_ids'''].shape
_UpperCamelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_UpperCamelCase = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__a)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a)
assert num_src_per_batch[0] == max(__a)
if failures:
raise AssertionError(F'''too many tokens in {len(__a)} batches''')
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_dataset(max_len=5_12)
_UpperCamelCase = 2
_UpperCamelCase = ds.make_sortish_sampler(__a , shuffle=__a)
_UpperCamelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2)
_UpperCamelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a)
_UpperCamelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids"):
return [batch[k].eq(__a).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k='''labels''')) < sum(count_pad_tokens(__a , k='''labels'''))
assert sum(count_pad_tokens(__a)) < sum(count_pad_tokens(__a))
assert len(__a) == len(__a)
def UpperCAmelCase ( self , __a=10_00 , __a=1_28) -> int:
'''simple docstring'''
if os.getenv('''USE_REAL_DATA''' , __a):
_UpperCamelCase = '''examples/seq2seq/wmt_en_ro'''
_UpperCamelCase = max_len * 2 * 64
if not Path(__a).joinpath('''train.len''').exists():
save_len_file(__a , __a)
else:
_UpperCamelCase = '''examples/seq2seq/test_data/wmt_en_ro'''
_UpperCamelCase = max_len * 4
save_len_file(__a , __a)
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path='''train''' , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_dataset()
_UpperCamelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a))
_UpperCamelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a))
assert idsa.intersection(__a) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a)
if tok_name == MBART_TINY:
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
_UpperCamelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
_UpperCamelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a) == 1 if tok_name == BART_TINY else len(__a) == 0
| 19 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _UpperCAmelCase( nn.Module ):
def __init__( self , __a = 16 , __a = 88 , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = None , __a = "geglu" , __a = None , ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__a , attention_head_dim=__a , in_channels=__a , num_layers=__a , dropout=__a , norm_num_groups=__a , cross_attention_dim=__a , attention_bias=__a , sample_size=__a , num_vector_embeds=__a , activation_fn=__a , num_embeds_ada_norm=__a , )
for _ in range(2)
])
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCamelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCamelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCamelCase = [1, 0]
def UpperCAmelCase ( self , __a , __a , __a=None , __a=None , __a=None , __a = True , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = hidden_states
_UpperCamelCase = []
_UpperCamelCase = 0
# attention_mask is not used yet
for i in range(2):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCamelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCamelCase = self.transformer_index_for_condition[i]
_UpperCamelCase = self.transformers[transformer_index](
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , return_dict=__a , )[0]
encoded_states.append(encoded_state - input_states)
tokens_start += self.condition_lengths[i]
_UpperCamelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCamelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__a)
| 19 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 19 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.array:
"""simple docstring"""
_UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCamelCase = np.zeros((n + 1,) )
_UpperCamelCase = ya
_UpperCamelCase = xa
for k in range(__snake_case ):
_UpperCamelCase = y[k] + step_size * ode_func(__snake_case, y[k] )
_UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(__snake_case, y[k] ) + ode_func(x + step_size, __snake_case ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 19 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ) -> Any:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ImageGPTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''clusters'''))
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_normalize'''))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
_UpperCamelCase = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key]))
else:
self.assertEqual(obj[key] , __a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__a , '''image_processor.json''')
image_processor_first.to_json_file(__a)
_UpperCamelCase = self.image_processing_class.from_json_file(__a).to_dict()
_UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a)
_UpperCamelCase = self.image_processing_class.from_pretrained(__a).to_dict()
_UpperCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , __a)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = load_dataset('''hf-internal-testing/fixtures_image_utils''', split='''test''' )
_UpperCamelCase = Image.open(dataset[4]['''file'''] )
_UpperCamelCase = Image.open(dataset[5]['''file'''] )
_UpperCamelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
_UpperCamelCase = prepare_images()
# test non-batched
_UpperCamelCase = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 10_24))
_UpperCamelCase = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a)
# test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 10_24))
_UpperCamelCase = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a)
| 19 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCamelCase = 1
_UpperCamelCase = 1
while repunit:
_UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase__ ( __snake_case = 1_00_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 | 1 |
"""simple docstring"""
from math import pi, sqrt
def lowerCamelCase__ ( __snake_case ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(__snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = 1.0
while num:
_a = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 19 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_a = logging.get_logger(__name__)
_a = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_a = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
_a = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
_a = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = RealmTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __a) != do_lower_case
or normalizer_state.get('''strip_accents''' , __a) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __a) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(__a , normalizer_state.pop('''type'''))
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**__a)
_UpperCamelCase = do_lower_case
def UpperCAmelCase ( self , __a , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = PaddingStrategy.MAX_LENGTH
_UpperCamelCase = text
_UpperCamelCase = kwargs.pop('''text_pair''' , __a)
_UpperCamelCase = kwargs.pop('''return_tensors''' , __a)
_UpperCamelCase = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__a):
if batch_text_pair is not None:
_UpperCamelCase = batch_text_pair[idx]
else:
_UpperCamelCase = None
_UpperCamelCase = super().__call__(__a , __a , return_tensors=__a , **__a)
_UpperCamelCase = encoded_candidates.get('''input_ids''')
_UpperCamelCase = encoded_candidates.get('''attention_mask''')
_UpperCamelCase = encoded_candidates.get('''token_type_ids''')
if encoded_input_ids is not None:
output_data["input_ids"].append(__a)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__a)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__a)
_UpperCamelCase = {key: item for key, item in output_data.items() if len(__a) != 0}
return BatchEncoding(__a , tensor_type=__a)
def UpperCAmelCase ( self , __a , __a=None) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(__a , name=__a)
return tuple(__a)
| 19 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[32, 64, 1_28] , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1e-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , __a=["stage1", "stage2"] , __a=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = patch_norm
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = is_training
_UpperCamelCase = scope
_UpperCamelCase = use_labels
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = encoder_stride
_UpperCamelCase = out_features
_UpperCamelCase = out_indices
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FocalNetModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
_UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def UpperCAmelCase ( self , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FocalNetBackbone(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_UpperCamelCase = None
_UpperCamelCase = FocalNetBackbone(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def UpperCAmelCase ( self , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = FocalNetForMaskedImageModeling(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = FocalNetForMaskedImageModeling(__a)
model.to(__a)
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = FocalNetForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = FocalNetForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = FocalNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@unittest.skip(reason='''FocalNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''')
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__a) , __a)
# FocalNet has a different seq_length
_UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__a) , __a)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = reshaped_hidden_states[0].shape
_UpperCamelCase = (
reshaped_hidden_states[0].view(__a , __a , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , __a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width))
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FocalNetModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([0.2166, -0.4368, 0.2191]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_81)
@require_torch
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowercase__ = FocalNetConfig
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FocalNetModelTester(self)
| 19 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', __snake_case ):
pass
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', __snake_case ) is None
with patch_submodule(_test_patching, '''len''', __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.join''', __snake_case ):
with patch_submodule(_test_patching, '''os.path.dirname''', __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', __snake_case ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', __snake_case ):
pass
| 19 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = original_name.split('''.''' )[0]
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 2] )
_UpperCamelCase = int(key_list[key_list.index(__snake_case ) - 1] )
_UpperCamelCase = orig_block_num - offset
_UpperCamelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''', F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict()
_UpperCamelCase , _UpperCamelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_UpperCamelCase = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCamelCase = key[: key.find('''proj''' )]
_UpperCamelCase = key.replace(__snake_case, F'''patch_embeddings.{total_embed_found}.''' )
_UpperCamelCase = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCamelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm1''', '''before_norm''' )
if "norm2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_UpperCamelCase = replace_key_with_offset(__snake_case, __snake_case, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_UpperCamelCase = key.replace('''head''', '''classifier''' )
_UpperCamelCase = value
return new_state_dict
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = model_name[-3:]
_UpperCamelCase = 10_00
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = (1, 10_00)
# set config attributes
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCamelCase = [2, 2, 6, 2]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s24":
_UpperCamelCase = [4, 4, 12, 4]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 0.9
elif size == "s36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [64, 1_28, 3_20, 5_12]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.9
elif size == "m36":
_UpperCamelCase = [6, 6, 18, 6]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
elif size == "m48":
_UpperCamelCase = [8, 8, 24, 8]
_UpperCamelCase = [96, 1_92, 3_84, 7_68]
_UpperCamelCase = 4.0
_UpperCamelCase = 1e-6
_UpperCamelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
# Prepare image
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__snake_case, return_tensors='''pt''' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
# rename keys
_UpperCamelCase = rename_keys(__snake_case )
# create HuggingFace model and load state dict
_UpperCamelCase = PoolFormerForImageClassification(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Define image processor
_UpperCamelCase = PoolFormerImageProcessor(crop_pct=__snake_case )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_UpperCamelCase = model(__snake_case )
_UpperCamelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCamelCase = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_UpperCamelCase = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_UpperCamelCase = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_UpperCamelCase = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_UpperCamelCase = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], __snake_case, atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 19 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00])
_UpperCamelCase = get_activation('''gelu''')
self.assertTrue(torch.allclose(gelu_python(__a) , torch_builtin(__a)))
self.assertFalse(torch.allclose(gelu_python(__a) , gelu_new(__a)))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00])
_UpperCamelCase = get_activation('''gelu''')
_UpperCamelCase = get_activation('''gelu_10''')
_UpperCamelCase = torch_builtin(__a)
_UpperCamelCase = geluaa(__a)
_UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(__a).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
get_activation('''gelu''')
get_activation('''gelu_10''')
get_activation('''gelu_fast''')
get_activation('''gelu_new''')
get_activation('''gelu_python''')
get_activation('''gelu_pytorch_tanh''')
get_activation('''linear''')
get_activation('''mish''')
get_activation('''quick_gelu''')
get_activation('''relu''')
get_activation('''sigmoid''')
get_activation('''silu''')
get_activation('''swish''')
get_activation('''tanh''')
with self.assertRaises(__a):
get_activation('''bogus''')
with self.assertRaises(__a):
get_activation(__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = get_activation('''gelu''')
_UpperCamelCase = 1
_UpperCamelCase = get_activation('''gelu''')
self.assertEqual(acta.a , 1)
with self.assertRaises(__a):
_UpperCamelCase = acta.a
| 19 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def UpperCAmelCase ( self , **__a) -> int:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a , use_karras_sigmas=__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(__a)
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(__a , __a)
_UpperCamelCase = model(__a , __a)
_UpperCamelCase = scheduler.step(__a , __a , __a)
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 19 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = 'nat'
lowercase__ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __a=4 , __a=3 , __a=64 , __a=[3, 4, 6, 5] , __a=[2, 4, 8, 16] , __a=7 , __a=3.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=0.02 , __a=1e-5 , __a=0.0 , __a=None , __a=None , **__a , ) -> Tuple:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(__a)
_UpperCamelCase = num_heads
_UpperCamelCase = kernel_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(__a) - 1))
_UpperCamelCase = layer_scale_init_value
_UpperCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__a) + 1)]
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names)
| 19 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = True , __a = 1 / 2_55 , __a = None , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = do_rescale
_UpperCamelCase = do_normalize
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''' , default_to_square=__a)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a)
if not is_batched(__a):
_UpperCamelCase = [images]
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 19 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 19 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 19 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if len(__snake_case ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
_UpperCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCAmelCase( unittest.TestCase , lowerCamelCase ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = load_tool('''text-classification''')
self.tool.setup()
_UpperCamelCase = load_tool('''text-classification''' , remote=__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(__a , '''positive''')
| 19 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase:
def __init__( self , __a) -> None:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
def lowerCamelCase__ ( __snake_case ) -> None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ) -> None: # Main function for testing.
"""simple docstring"""
_UpperCamelCase = Node(1 )
_UpperCamelCase = Node(2 )
_UpperCamelCase = Node(3 )
_UpperCamelCase = Node(4 )
_UpperCamelCase = Node(5 )
_UpperCamelCase = Node(6 )
_UpperCamelCase = Node(7 )
_UpperCamelCase = Node(8 )
_UpperCamelCase = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print('''Tree is: ''' )
display(__snake_case )
if __name__ == "__main__":
main()
| 19 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_a = """bart"""
_a = True
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCamelCase = faiss.StandardGpuResources()
_UpperCamelCase = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
_UpperCamelCase = faiss.index_cpu_to_gpu(__snake_case, 1, __snake_case )
wikiaab_gpu_index_flat.add(__snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase = (None, None)
_UpperCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
_UpperCamelCase = elia['''train_eli5''']
_UpperCamelCase = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(__snake_case )
return (elia_train, eli5_train_q_index)
_a , _a , _a = load_indexes()
_a , _a , _a , _a = load_models()
_a , _a = load_train_data()
def lowerCamelCase__ ( __snake_case, __snake_case=10 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = embed_questions_for_retrieval([question], __snake_case, __snake_case )
_UpperCamelCase , _UpperCamelCase = eli5_train_q_index.search(__snake_case, __snake_case )
_UpperCamelCase = [elia_train[int(__snake_case )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( __snake_case, __snake_case="wiki40b", __snake_case="dense", __snake_case=10 ) -> List[str]:
"""simple docstring"""
if source == "none":
_UpperCamelCase , _UpperCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase = query_qa_dense_index(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
else:
_UpperCamelCase , _UpperCamelCase = query_es_index(
__snake_case, __snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=__snake_case, )
_UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase = '''question: {} context: {}'''.format(__snake_case, __snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __snake_case : None),
} )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=64, __snake_case=2_56, __snake_case=False, __snake_case=2, __snake_case=0.95, __snake_case=0.8 ) -> Dict:
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = qa_sas_generate(
__snake_case, __snake_case, __snake_case, num_answers=1, num_beams=__snake_case, min_len=__snake_case, max_len=__snake_case, do_sample=__snake_case, temp=__snake_case, top_p=__snake_case, top_k=__snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_a = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_a = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_a = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_a = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_a = st.sidebar.checkbox("""Demo options""")
if demo_options:
_a = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_a = action_list.index(action_st)
_a = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_a = show_type == """Show full text of passages"""
else:
_a = 3
_a = True
_a = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_a = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_a = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_a = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_a = """wiki40b"""
_a = """dense"""
_a = """beam"""
_a = 2
_a = 64
_a = 256
_a = None
_a = None
_a = st.sidebar.checkbox("""Generation options""")
if generate_options:
_a = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_a = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_a = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_a = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_a = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_a = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_a = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_a = None
# start main text
_a = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_a = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_a = st.text_input("""Enter your question here:""", """""")
else:
_a = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_a , _a = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_a , _a = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_a = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_a = support_list[:10]
_a = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_a , _a = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_a , _a = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_a = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_a = res[1].strip()
if sec_titles == "":
_a = """[{}]({})""".format(res[0], wiki_url)
else:
_a = sec_titles.split(""" & """)
_a = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_a = find_nearest_training(question)
_a = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_a = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_a = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 19 | 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCAmelCase:
def __init__( self , __a) -> Optional[int]:
'''simple docstring'''
if isinstance(__a , __a):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_UpperCamelCase = deepcopy(__a)
elif os.path.exists(__a):
with io.open(__a , '''r''' , encoding='''utf-8''') as f:
_UpperCamelCase = json.load(__a)
else:
try:
_UpperCamelCase = baseaa.urlsafe_baadecode(__a).decode('''utf-8''')
_UpperCamelCase = json.loads(__a)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''')
_UpperCamelCase = config
self.set_stage_and_offload()
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
_UpperCamelCase = self.get_value('''zero_optimization.stage''' , -1)
# offload
_UpperCamelCase = False
if self.is_zeroa() or self.is_zeroa():
_UpperCamelCase = set(['''cpu''', '''nvme'''])
_UpperCamelCase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device'''),
self.get_value('''zero_optimization.offload_param.device'''),
])
if len(offload_devices & offload_devices_valid) > 0:
_UpperCamelCase = True
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.config
# find the config node of interest if it exists
_UpperCamelCase = ds_key_long.split('''.''')
_UpperCamelCase = nodes.pop()
for node in nodes:
_UpperCamelCase = config.get(__a)
if config is None:
return None, ds_key
return config, ds_key
def UpperCAmelCase ( self , __a , __a=None) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.find_config_node(__a)
if config is None:
return default
return config.get(__a , __a)
def UpperCAmelCase ( self , __a , __a=False) -> int:
'''simple docstring'''
_UpperCamelCase = self.config
# find the config node of interest if it exists
_UpperCamelCase = ds_key_long.split('''.''')
for node in nodes:
_UpperCamelCase = config
_UpperCamelCase = config.get(__a)
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''')
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__a)
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_value(__a)
return False if value is None else bool(__a)
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_value(__a)
return False if value is None else not bool(__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return self._stage == 2
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self._stage == 3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self._offload
class _UpperCAmelCase:
def __init__( self , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = engine
def UpperCAmelCase ( self , __a , **__a) -> List[str]:
'''simple docstring'''
# runs backpropagation and handles mixed precision
self.engine.backward(__a , **__a)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a) -> List[Any]:
'''simple docstring'''
super().__init__(__a , device_placement=__a , scaler=__a)
_UpperCamelCase = hasattr(self.optimizer , '''overflow''')
def UpperCAmelCase ( self , __a=None) -> Tuple:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
super().__init__(__a , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCAmelCase:
def __init__( self , __a , __a=0.001 , __a=0 , **__a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = params
_UpperCamelCase = lr
_UpperCamelCase = weight_decay
_UpperCamelCase = kwargs
class _UpperCAmelCase:
def __init__( self , __a , __a=None , __a=0 , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = optimizer
_UpperCamelCase = total_num_steps
_UpperCamelCase = warmup_num_steps
_UpperCamelCase = kwargs
| 19 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case, __snake_case, __snake_case, __snake_case, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case, __snake_case, __snake_case, __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__snake_case, __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_UpperCamelCase = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case, __snake_case, bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case, add_adapter=__snake_case, adapter_stride=__snake_case, adapter_kernel_size=__snake_case, use_auth_token=__snake_case, output_hidden_size=__snake_case, )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case, use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder, __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__snake_case )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 25_00_04
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 19 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'AutoImageProcessor'
lowercase__ = 'AutoTokenizer'
def __init__( self , __a , __a) -> Any:
'''simple docstring'''
super().__init__(__a , __a)
_UpperCamelCase = self.image_processor
def __call__( self , __a=None , __a=None , __a=None , **__a) -> Any:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 19 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19 | 1 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
_UpperCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''')
_UpperCamelCase = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
_UpperCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
_UpperCamelCase = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCamelCase = model(__a , decoder_input_ids=__a).logits
_UpperCamelCase = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1])).mean()
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 19 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'ViTImageProcessor'
lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __a=None , __a=None , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __a , )
_UpperCamelCase = kwargs.pop('''feature_extractor''')
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__a , __a)
def __call__( self , __a=None , __a=None , __a=None , __a=None , **__a) -> Tuple:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''')
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''')
if text is not None:
_UpperCamelCase = self.tokenizer(__a , return_tensors=__a , **__a)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if images is not None:
_UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__a) , tensor_type=__a)
def UpperCAmelCase ( self , *__a , **__a) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , )
return self.image_processor_class
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , )
return self.image_processor
| 19 | 1 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase( pl.LightningModule ):
def __init__( self , __a) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = LongformerModel.from_pretrained(__snake_case )
_UpperCamelCase = LightningModel(__snake_case )
_UpperCamelCase = torch.load(__snake_case, map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(__snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__snake_case )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 19 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_a = """
Human: <<task>>
Assistant: """
_a = """huggingface-tools/default-prompts"""
_a = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case="run" ) -> Any:
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''', __snake_case ) is not None:
return prompt_or_repo_id
_UpperCamelCase = cached_file(
__snake_case, PROMPT_FILES[mode], repo_type='''dataset''', user_agent={'''agent''': agent_name} )
with open(__snake_case, '''r''', encoding='''utf-8''' ) as f:
return f.read()
| 19 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase:
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
def UpperCAmelCase ( self , __a=None , __a=None , __a=None , __a=None , __a=None) -> Dict:
'''simple docstring'''
if red is not None:
_UpperCamelCase = red
if green is not None:
_UpperCamelCase = green
if blue is not None:
_UpperCamelCase = blue
if red_edge is not None:
_UpperCamelCase = red_edge
if nir is not None:
_UpperCamelCase = nir
return True
def UpperCAmelCase ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None) -> List[str]:
'''simple docstring'''
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a)
_UpperCamelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''')
return False
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __a=0.08 , __a=1.22 , __a=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __a=0.16) -> Optional[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __a=0.5) -> Dict:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self , __a=None , __a=None) -> Any:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
_UpperCamelCase = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 19 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.