code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase ( _A : str )-> List[Any]:
"""simple docstring"""
A__ = []
A__ = []
A__ = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
A__ = len(_lowercase ) if (len(_lowercase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(_lowercase ) , "Postfix".center(_lowercase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowercase ) == 0:
stack.append(_lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowercase ) # push x to stack
print(
x.center(8 ) , ("".join(_lowercase )).ljust(_lowercase ) , ("".join(_lowercase )).ljust(_lowercase ) , sep=" | " , ) # Output in tabular format
while len(_lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(_lowercase )).ljust(_lowercase ) , ("".join(_lowercase )).ljust(_lowercase ) , sep=" | " , ) # Output in tabular format
return "".join(_lowercase ) # return Postfix as str
def UpperCamelCase ( _A : int )-> Union[str, Any]:
"""simple docstring"""
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowercase ) ):
if infix[i] == "(":
A__ = ")" # change "(" to ")"
elif infix[i] == ")":
A__ = "(" # change ")" to "("
return (infix_2_postfix("".join(_lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Any = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCAmelCase_ : Optional[Any] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 491 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_: str = logging.get_logger(__name__)
A_: List[str] = {'vocab_file': 'spiece.model'}
A_: Optional[int] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
A_: List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
A_: Tuple = '▁'
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
_lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_lowercase = do_lower_case
_lowercase = remove_space
_lowercase = keep_accents
_lowercase = vocab_file
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowercase = self.__dict__.copy()
_lowercase = None
return state
def __setstate__( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowercase = {}
_lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
_lowercase = """ """.join(inputs.strip().split() )
else:
_lowercase = inputs
_lowercase = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
_lowercase = unicodedata.normalize("""NFKD""" , lowerCamelCase_ )
_lowercase = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
_lowercase = outputs.lower()
return outputs
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = self.preprocess_text(lowerCamelCase_ )
_lowercase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
_lowercase = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowercase = cur_pieces[1:]
else:
_lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase_ )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = []
_lowercase = """"""
_lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
_lowercase = True
_lowercase = []
else:
current_sub_tokens.append(lowerCamelCase_ )
_lowercase = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , """wb""" ) as fi:
_lowercase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 398 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCAmelCase__ :List[str] = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __lowercase (_lowercase = "mumbai" ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase : Any = BeautifulSoup(requests.get(url + location ).content, """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""", attrs={"""data-tn-component""": """organicJob"""} ):
__lowerCamelCase : Dict = job.find("""a""", attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__lowerCamelCase : Optional[int] = job.find("""span""", {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 150 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
A = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
A = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
A = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 0 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict=False ):
"""simple docstring"""
try:
_lowerCamelCase : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase : Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase : Tuple = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
UpperCAmelCase_ : List[str] = parse_flag_from_env('RUN_REMOTE', default=False)
UpperCAmelCase_ : Optional[int] = parse_flag_from_env('RUN_LOCAL', default=True)
UpperCAmelCase_ : Dict = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
UpperCAmelCase_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
UpperCAmelCase_ : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
UpperCAmelCase_ : Tuple = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
UpperCAmelCase_ : Any = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
UpperCAmelCase_ : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
UpperCAmelCase_ : Union[str, Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_lowerCamelCase : Optional[Any] = unittest.skip("test requires faiss" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_lowerCamelCase : str = unittest.skip("test requires regex" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_lowerCamelCase : List[Any] = unittest.skip("test requires elasticsearch" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_lowerCamelCase : Tuple = unittest.skip("test requires sqlalchemy" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_lowerCamelCase : Optional[int] = unittest.skip("test requires PyTorch" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
if not config.TF_AVAILABLE:
_lowerCamelCase : int = unittest.skip("test requires TensorFlow" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
_lowerCamelCase : List[str] = unittest.skip("test requires JAX" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
_lowerCamelCase : Union[str, Any] = unittest.skip("test requires Pillow" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(_lowercase )
else:
return test_case
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(_lowercase )
else:
return test_case
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(_lowercase )
else:
return test_case
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def _require_spacy_model(_lowerCAmelCase : int ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip("test requires spacy" )(_lowercase )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(_lowercase )
else:
return test_case
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(_lowercase )
else:
return test_case
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_lowerCamelCase : int = unittest.skip("test is slow" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_lowerCamelCase : Tuple = unittest.skip("test is local" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_lowerCamelCase : Dict = unittest.skip("test is packaged" )(_lowercase )
return test_case
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_lowerCamelCase : int = unittest.skip("test requires remote" )(_lowercase )
return test_case
def A_ ( *_lowerCAmelCase : Dict ):
"""simple docstring"""
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith("test" ):
for decorator in decorators:
_lowerCamelCase : Optional[int] = decorator(_lowercase )
setattr(cls , _lowercase , _lowercase )
return cls
return decorate
class UpperCAmelCase__ ( lowerCamelCase_ ):
pass
class UpperCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
@contextmanager
def A_ ( _lowerCAmelCase : str=OfflineSimulationMode.CONNECTION_FAILS , _lowerCAmelCase : Optional[int]=1E-16 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = requests.Session().request
def timeout_request(_lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , **_lowerCAmelCase : Any ):
# Change the url to an invalid url so that the connection hangs
_lowerCamelCase : Any = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
_lowerCamelCase : Dict = timeout
try:
return online_request(_lowercase , _lowercase , **_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_lowerCamelCase : Tuple = url
_lowerCamelCase : List[str] = e.args[0]
_lowerCamelCase : Tuple = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),)
_lowerCamelCase : Union[str, Any] = (max_retry_error,)
raise
def raise_connection_error(_lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Tuple ):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowercase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def A_ ( *_lowerCAmelCase : int , **_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase , **_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def A_ ( ):
"""simple docstring"""
import gc
gc.collect()
_lowerCamelCase : str = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A_ ( ):
"""simple docstring"""
import gc
gc.collect()
_lowerCamelCase : str = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_lowercase ).integers(0 , 100 , 10 ).tolist()
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCAmelCase : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : Dict ):
try:
return func(*_lowercase , **_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith("500" ) or str(_lowercase ).startswith("502" ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper , _lowercase )
class UpperCAmelCase__ :
def __init__( self : Any,__A : Optional[int],__A : List[str],__A : str ):
_lowerCamelCase : Optional[int] = returncode
_lowerCamelCase : str = stdout
_lowerCamelCase : Tuple = stderr
async def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
"""simple docstring"""
while True:
_lowerCamelCase : Optional[Any] = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Any=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_lowercase ) )
_lowerCamelCase : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = []
def tee(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]="" ):
_lowerCamelCase : List[Any] = line.decode("utf-8" ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase , _lowercase , file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCAmelCase : tee(_lowercase , _lowercase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda _lowerCAmelCase : tee(_lowercase , _lowercase , sys.stderr , label="stderr:" ) ),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase )
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=180 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=True ):
"""simple docstring"""
_lowerCamelCase : Tuple = asyncio.get_event_loop()
_lowerCamelCase : Any = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase ) )
_lowerCamelCase : str = " ".join(_lowercase )
if result.returncode > 0:
_lowerCamelCase : int = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
_lowerCamelCase : Union[str, Any] = re.sub(r"^gw" , "" , _lowercase , 0 , re.M )
return int(_lowercase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 29500
_lowerCamelCase : str = pytest_xdist_worker_id()
return port + uniq_delta | 44 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : List[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 623 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ = 1_00_00_00 ):
'''simple docstring'''
A_ : Optional[Any] = set(range(3 , _lowercase , 2 ) )
primes.add(2 )
for p in range(3 , _lowercase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowercase , _lowercase ) ) )
A_ : Dict = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase , limit + 1 , _lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }") | 667 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowercase )
return n == n[::-1]
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =0
for i in range(1 , _lowercase ):
if is_palindrome(_lowercase ) and is_palindrome(bin(_lowercase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 474 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 0 |
from math import pow
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> str:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A : Optional[Any] = int(pow(_lowercase , _lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A , A : Optional[Any] = backtrack(
_lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A , A : List[Any] = backtrack(
_lowercase , _lowercase , current_number + 1 , _lowercase , _lowercase )
return current_sum, solutions_count
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(_lowercase , _lowercase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = 1
UpperCamelCase_ = 3
UpperCamelCase_ = (32, 32)
UpperCamelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCamelCase_ )
@property
def _UpperCAmelCase ( self ) -> List[str]:
def extract(*_UpperCAmelCase , **_UpperCAmelCase ):
class _a :
"""simple docstring"""
def __init__( self ) -> Tuple:
UpperCamelCase_ = torch.ones([0] )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.dummy_cond_unet
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCamelCase_ = self.dummy_vae
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCamelCase_ = 77
UpperCamelCase_ = self.dummy_image.to(lowerCamelCase_ )
UpperCamelCase_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase_ )
UpperCamelCase_ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase_ = alt_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCamelCase_ , )
UpperCamelCase_ = output.images
UpperCamelCase_ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCamelCase_ = alt_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase_ = image[0, -3:, -3:, -1]
UpperCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_ = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.dummy_cond_unet
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCamelCase_ = self.dummy_vae
UpperCamelCase_ = self.dummy_text_encoder
UpperCamelCase_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCamelCase_ = 77
UpperCamelCase_ = self.dummy_image.to(lowerCamelCase_ )
# put models in fp16
UpperCamelCase_ = unet.half()
UpperCamelCase_ = vae.half()
UpperCamelCase_ = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase_ = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCamelCase_ )
UpperCamelCase_ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase_ = 'A painting of a squirrel eating a burger'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe(
[prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='np' , image=lowerCamelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase_ = init_image.resize((760, 504) )
UpperCamelCase_ = 'BAAI/AltDiffusion'
UpperCamelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCamelCase_ , output_type='np' , )
UpperCamelCase_ = output.images[0]
UpperCamelCase_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase_ = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_ = init_image.resize((768, 512) )
UpperCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
UpperCamelCase_ = 'BAAI/AltDiffusion'
UpperCamelCase_ = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase_ = 'A fantasy landscape, trending on artstation'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCamelCase_ , output_type='np' , )
UpperCamelCase_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 23 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
import random
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :int ) -> tuple[list[int], list[int]]:
__UpperCamelCase : Optional[int] = [ord(lowerCamelCase_ ) for i in text]
__UpperCamelCase : str = []
__UpperCamelCase : Optional[int] = []
for i in plain:
__UpperCamelCase : Optional[Any] = random.randint(1 , 3_0_0 )
__UpperCamelCase : str = (i + k) * k
cipher.append(lowerCamelCase_ )
key.append(lowerCamelCase_ )
return cipher, key
@staticmethod
def _lowerCamelCase ( a :Dict , a :Dict ) -> str:
__UpperCamelCase : Dict = []
for i in range(len(lowerCamelCase_ ) ):
__UpperCamelCase : Optional[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase_ ) )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
lowercase , lowercase : Optional[int] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k)) | 557 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( lowerCamelCase_ ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=lowerCamelCase_ , speech_processor=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , )
def __A ( self , UpperCAmelCase__ = "auto" ):
if slice_size == "auto":
A__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __A ( self ):
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__=16_000 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 512 , UpperCAmelCase__ = 50 , UpperCAmelCase__ = 7.5 , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "pil" , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = 1 , **UpperCAmelCase__ , ):
A__ = self.speech_processor.feature_extractor(
lowerCamelCase_ , return_tensors="pt" , sampling_rate=lowerCamelCase_ ).input_features.to(self.device )
A__ = self.speech_model.generate(lowerCamelCase_ , max_length=480_000 )
A__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , normalize=lowerCamelCase_ )[
0
]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = 1
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = len(lowerCamelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase_ )}.""" )
# get prompt text embeddings
A__ = self.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A__ , A__ , A__ = text_embeddings.shape
A__ = text_embeddings.repeat(1 , lowerCamelCase_ , 1 )
A__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A__ = 42
if negative_prompt is None:
A__ = [""] * batch_size
elif type(lowerCamelCase_ ) is not type(lowerCamelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase_ )} !="""
F""" {type(lowerCamelCase_ )}.""" )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = [negative_prompt]
elif batch_size != len(lowerCamelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
A__ = negative_prompt
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
lowerCamelCase_ , padding="max_length" , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors="pt" , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = uncond_embeddings.shape[1]
A__ = uncond_embeddings.repeat(1 , lowerCamelCase_ , 1 )
A__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A__ = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device="cpu" , dtype=lowerCamelCase_ ).to(
self.device )
else:
A__ = torch.randn(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=lowerCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# predict the noise residual
A__ = self.unet(lowerCamelCase_ , lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
A__ , A__ = noise_pred.chunk(2 )
A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A__ = 1 / 0.18_215 * latents
A__ = self.vae.decode(lowerCamelCase_ ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase_ , nsfw_content_detected=lowerCamelCase_ )
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
import argparse
import struct
import unittest
class _lowercase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = data
# Initialize hash values
_lowercase = [
0X6A09E667,
0XBB67AE85,
0X3C6EF372,
0XA54FF53A,
0X510E527F,
0X9B05688C,
0X1F83D9AB,
0X5BE0CD19,
]
# Initialize round constants
_lowercase = [
0X428A2F98,
0X71374491,
0XB5C0FBCF,
0XE9B5DBA5,
0X3956C25B,
0X59F111F1,
0X923F82A4,
0XAB1C5ED5,
0XD807AA98,
0X12835B01,
0X243185BE,
0X550C7DC3,
0X72BE5D74,
0X80DEB1FE,
0X9BDC06A7,
0XC19BF174,
0XE49B69C1,
0XEFBE4786,
0X0FC19DC6,
0X240CA1CC,
0X2DE92C6F,
0X4A7484AA,
0X5CB0A9DC,
0X76F988DA,
0X983E5152,
0XA831C66D,
0XB00327C8,
0XBF597FC7,
0XC6E00BF3,
0XD5A79147,
0X06CA6351,
0X14292967,
0X27B70A85,
0X2E1B2138,
0X4D2C6DFC,
0X53380D13,
0X650A7354,
0X766A0ABB,
0X81C2C92E,
0X92722C85,
0XA2BFE8A1,
0XA81A664B,
0XC24B8B70,
0XC76C51A3,
0XD192E819,
0XD6990624,
0XF40E3585,
0X106AA070,
0X19A4C116,
0X1E376C08,
0X2748774C,
0X34B0BCB5,
0X391C0CB3,
0X4ED8AA4A,
0X5B9CCA4F,
0X682E6FF3,
0X748F82EE,
0X78A5636F,
0X84C87814,
0X8CC70208,
0X90BEFFFA,
0XA4506CEB,
0XBEF9A3F7,
0XC67178F2,
]
_lowercase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = b"""\x80""" + (b"""\x00""" * (63 - (len(lowerCamelCase_ ) + 8) % 64))
_lowercase = struct.pack(""">Q""" , (len(lowerCamelCase_ ) * 8) )
return data + padding + big_endian_integer
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowercase = list(struct.unpack(""">16L""" , lowerCamelCase_ ) )
# add 48 0-ed integers
words += [0] * 48
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowercase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_lowercase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_lowercase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
_lowercase = self.ror(lowerCamelCase_ , 6 ) ^ self.ror(lowerCamelCase_ , 11 ) ^ self.ror(lowerCamelCase_ , 25 )
_lowercase = (e & f) ^ ((~e & 0XFFFFFFFF) & g)
_lowercase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
_lowercase = self.ror(lowerCamelCase_ , 2 ) ^ self.ror(lowerCamelCase_ , 13 ) ^ self.ror(lowerCamelCase_ , 22 )
_lowercase = (a & b) ^ (a & c) ^ (b & c)
_lowercase = (sa + maj) % 0X100000000
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
_lowercase = [a, b, c, d, e, f, g, h]
# Modify final values
_lowercase = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
_lowercase = """""".join([hex(lowerCamelCase_ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
return 0XFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
import hashlib
_lowercase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCamelCase_ ).hash , hashlib.shaaaa(lowerCamelCase_ ).hexdigest() )
def __lowerCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" ,"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument(
"""-f""" ,"""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
_lowercase = parser.parse_args()
_lowercase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
_lowercase = f.read()
else:
_lowercase = bytes(_lowercase ,"""utf-8""" )
print(SHAaaa(_lowercase ).hash )
if __name__ == "__main__":
main()
| 398 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowercase (_lowercase, _lowercase, _lowercase=None, _lowercase=None ) -> Dict:
"""simple docstring"""
if attention_mask is None:
__lowerCamelCase : int = tf.cast(tf.math.not_equal(_lowercase, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
snake_case__ : int = OPTConfig
snake_case__ : List[Any] = {}
snake_case__ : Optional[Any] = 'gelu'
def __init__( self : List[str] , A__ : int , A__ : Optional[int]=13 , A__ : str=7 , A__ : Tuple=True , A__ : Tuple=False , A__ : str=99 , A__ : List[Any]=16 , A__ : List[Any]=2 , A__ : int=4 , A__ : Union[str, Any]=4 , A__ : int="gelu" , A__ : Optional[Any]=0.1 , A__ : List[str]=0.1 , A__ : Tuple=20 , A__ : int=2 , A__ : Dict=1 , A__ : Union[str, Any]=0 , A__ : int=16 , A__ : List[Any]=16 , ):
"""simple docstring"""
__lowerCamelCase : str = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : List[str] = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Optional[Any] = use_labels
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : Tuple = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Optional[Any] = eos_token_id
__lowerCamelCase : Any = pad_token_id
__lowerCamelCase : Optional[Any] = bos_token_id
__lowerCamelCase : Optional[Any] = embed_dim
__lowerCamelCase : int = word_embed_proj_dim
__lowerCamelCase : List[str] = False
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase_ , **self.config_updates , )
__lowerCamelCase : int = prepare_opt_inputs_dict(lowerCamelCase_ , lowerCamelCase_ )
return config, inputs_dict
def a_ ( self : Tuple , A__ : List[str] , A__ : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : Dict = TFOPTModel(config=lowerCamelCase_ )
__lowerCamelCase : str = inputs_dict["""input_ids"""]
__lowerCamelCase : Dict = input_ids[:1, :]
__lowerCamelCase : int = inputs_dict["""attention_mask"""][:1, :]
__lowerCamelCase : str = 1
# first forward pass
__lowerCamelCase : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
__lowerCamelCase , __lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCamelCase : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
__lowerCamelCase : Optional[int] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCamelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCamelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
__lowerCamelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , rtol=1e-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
snake_case__ : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case__ : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case__ : int = False
snake_case__ : Optional[int] = False
snake_case__ : Tuple = False
snake_case__ : Optional[int] = 1_0
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = TFOPTModelTester(self )
__lowerCamelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase_ )
def a_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A__ : Union[str, Any] , A__ : List[str] ):
if hasattr(lowerCamelCase_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCamelCase : Tuple = model_class(config=lowerCamelCase_ )
__lowerCamelCase : List[str] = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings() )
__lowerCamelCase : List[str] = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = _get_word_embedding_weight(lowerCamelCase_ , model.get_input_embeddings() )
__lowerCamelCase : List[Any] = _get_word_embedding_weight(lowerCamelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCamelCase : List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase_ )
# check that weights remain the same after resizing
__lowerCamelCase : List[str] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase : List[Any] = False
self.assertTrue(lowerCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase_ )
__lowerCamelCase : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCamelCase : Optional[Any] = False
self.assertTrue(lowerCamelCase_ )
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
return tf.constant(_lowercase, dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : List[Any] = 9_9
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCamelCase : Union[str, Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCamelCase : List[Any] = input_ids.shape[0]
__lowerCamelCase : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__lowerCamelCase : Union[str, Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase : List[str] = tf.not_equal(lowerCamelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCamelCase : Optional[int] = model(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ ).last_hidden_state
__lowerCamelCase : Tuple = (1, 11, 512)
self.assertEqual(output.shape , lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4e-3 ) )
__lowerCamelCase : str = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_ )
__lowerCamelCase : Any = xla_generate(lowerCamelCase_ , lowerCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=4e-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : str ):
"""simple docstring"""
super().setUp()
__lowerCamelCase : int = """facebook/opt-350m"""
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCamelCase : str = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCamelCase : int = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCamelCase : Optional[Any] = tokenizer(lowerCamelCase_ , return_tensors="""tf""" , padding=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__lowerCamelCase : Dict = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCamelCase : Optional[int] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4 ) )
__lowerCamelCase : Dict = tf.function(lowerCamelCase_ , jit_compile=lowerCamelCase_ )
__lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def a_ ( self : Any ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Tuple = """facebook/opt-125m"""
__lowerCamelCase : Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : str = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
__lowerCamelCase : Dict = tokenizer(lowerCamelCase_ , return_tensors="""tf""" ).input_ids
__lowerCamelCase : int = model.generate(lowerCamelCase_ , max_length=10 )
__lowerCamelCase : str = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Any = """facebook/opt-350m"""
__lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : str = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : List[str] = """left"""
# use different length sentences to test batching
__lowerCamelCase : Optional[Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__lowerCamelCase : Tuple = tokenizer(lowerCamelCase_ , return_tensors="""tf""" , padding=lowerCamelCase_ )
__lowerCamelCase : str = inputs["""input_ids"""]
__lowerCamelCase : Optional[Any] = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs["""attention_mask"""] )
__lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCamelCase : List[Any] = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
__lowerCamelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
__lowerCamelCase : str = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
__lowerCamelCase : Any = [
"""Hello, my dog is a little bit of a dork.\nI\'m a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
def a_ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase : Tuple = """facebook/opt-350m"""
__lowerCamelCase : List[str] = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCamelCase : List[str] = []
__lowerCamelCase : List[str] = GPTaTokenizer.from_pretrained(lowerCamelCase_ )
__lowerCamelCase : List[str] = TFOPTForCausalLM.from_pretrained(lowerCamelCase_ )
for prompt in self.prompts:
__lowerCamelCase : Dict = tokenizer(lowerCamelCase_ , return_tensors="""tf""" ).input_ids
__lowerCamelCase : Union[str, Any] = model.generate(lowerCamelCase_ , max_length=10 )
__lowerCamelCase : str = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 150 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( lowerCamelCase_):
_lowerCAmelCase = '''decision_transformer'''
_lowerCAmelCase = ['''past_key_values''']
_lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=17, A=4, A=128, A=4096, A=True, A=1, A=1024, A=3, A=1, A=None, A="relu", A=0.1, A=0.1, A=0.1, A=1e-5, A=0.02, A=True, A=True, A=5_0256, A=5_0256, A=False, A=False, **A, ):
"""simple docstring"""
lowerCamelCase : Dict = state_dim
lowerCamelCase : Optional[Any] = act_dim
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Any = max_ep_len
lowerCamelCase : Tuple = action_tanh
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Dict = n_positions
lowerCamelCase : Optional[Any] = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : Any = n_inner
lowerCamelCase : int = activation_function
lowerCamelCase : Any = resid_pdrop
lowerCamelCase : Optional[int] = embd_pdrop
lowerCamelCase : int = attn_pdrop
lowerCamelCase : Dict = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Union[str, Any] = scale_attn_weights
lowerCamelCase : Tuple = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Tuple = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
| 320 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Any,__A : List[str],__A : int=1_3,__A : Union[str, Any]=7,__A : Optional[Any]=True,__A : Optional[int]=True,__A : Any=True,__A : Optional[Any]=True,__A : List[str]=9_9,__A : Dict=3_2,__A : Tuple=5,__A : List[str]=4,__A : Dict=3_7,__A : Any="gelu",__A : Optional[int]=0.1,__A : List[Any]=0.1,__A : int=5_1_2,__A : Optional[int]=1_6,__A : Union[str, Any]=2,__A : str=0.02,__A : List[str]=4,):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Tuple = use_attention_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Any = use_labels
_lowerCamelCase : int = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = num_choices
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[str] = None
if self.use_attention_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size,dim=self.hidden_size,n_layers=self.num_hidden_layers,n_heads=self.num_attention_heads,hidden_dim=self.intermediate_size,hidden_act=self.hidden_act,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,tie_weights_=lowerCamelCase_,)
return config, input_ids, attention_mask
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Dict = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : int = model_class_name.from_pretrained("distilbert-base-uncased" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Any = model(lowerCamelCase_,attention_mask=lowerCamelCase_ )[0]
_lowerCamelCase : Optional[Any] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,lowerCamelCase_ )
_lowerCamelCase : List[str] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],lowerCamelCase_,atol=1e-4 ) ) | 44 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 0 |
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase , lowerCamelCase = 0, 0
for i in range(1 , len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase = min_edge
while go_next(_lowercase , _lowercase , _lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase , lowerCamelCase = i, i + z_result[i] - 1
return z_result
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 623 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
__lowercase =PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
__lowercase =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
__lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__lowercase =CLIPTextModel(lowerCamelCase_)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=0):
'''simple docstring'''
__lowercase =floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
__lowercase =image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowercase =Image.fromarray(np.uinta(lowerCamelCase_)).convert('RGB').resize((6_4, 6_4))
__lowercase =Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('mps'):
__lowercase =torch.manual_seed(lowerCamelCase_)
else:
__lowercase =torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase =self.get_dummy_components()
__lowercase =StableDiffusionInpaintPipeline(**lowerCamelCase_)
__lowercase =sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__lowercase =self.get_dummy_inputs(lowerCamelCase_)
__lowercase =sd_pipe(**lowerCamelCase_).images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase =np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : int):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
__lowercase ='stabilityai/stable-diffusion-2-inpainting'
__lowercase =StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
__lowercase ='Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase =torch.manual_seed(0)
__lowercase =pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
__lowercase =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowercase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
__lowercase ='stabilityai/stable-diffusion-2-inpainting'
__lowercase =StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
__lowercase ='Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase =torch.manual_seed(0)
__lowercase =pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
__lowercase =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __lowerCamelCase ( self : int):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
__lowercase ='stabilityai/stable-diffusion-2-inpainting'
__lowercase =PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler')
__lowercase =StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowercase ='Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase =torch.manual_seed(0)
__lowercase =pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='np' , )
__lowercase =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 474 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = GPTaTokenizer
__lowerCamelCase : List[str] = GPTaTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = {"add_prefix_space": True}
__lowerCamelCase : str = False
def _lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
A : Any = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
A : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A : List[Any] = {"""unk_token""": """<unk>"""}
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = """lower newer"""
A : str = """lower newer"""
return input_text, output_text
def _lowerCAmelCase ( self ):
A : Dict = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
A : Optional[int] = """lower newer"""
A : Optional[int] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A : Dict = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
A : Dict = tokens + [tokenizer.unk_token]
A : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def _lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
A : List[Any] = self.get_tokenizer()
A : int = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
A : List[str] = """lower newer"""
# Testing tokenization
A : Union[str, Any] = tokenizer.tokenize(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
A : int = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids without special tokens
A : str = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
A : Any = rust_tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing conversion to ids with special tokens
A : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase_ )
A : Any = tokenizer.encode(lowerCamelCase_, add_prefix_space=lowerCamelCase_ )
A : Optional[Any] = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
# Testing the unknown token
A : Optional[Any] = tokens + [rust_tokenizer.unk_token]
A : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase_ ), lowerCamelCase_ )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCAmelCase ( self, lowerCamelCase__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_ )
# Simple input
A : Optional[Any] = """This is a simple input"""
A : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
A : List[Any] = ("""This is a simple input""", """This is a pair""")
A : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding="""max_length""", )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode, lowerCamelCase_, max_length=lowerCamelCase_, padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase_, tokenizer_r.encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase_, tokenizer_r.batch_encode_plus, lowerCamelCase_, max_length=lowerCamelCase_, padding="""max_length""", )
def _lowerCAmelCase ( self ):
A : int = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="""<pad>""" )
# Simple input
A : Optional[Any] = """This is a simple input"""
A : Optional[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
A : Optional[int] = ("""This is a simple input""", """This is a pair""")
A : List[str] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
A : int = tokenizer.pad_token_id
A : int = tokenizer(lowerCamelCase_, padding="""max_length""", max_length=30, return_tensors="""np""" )
A : int = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors="""np""" )
A : List[str] = tokenizer(*lowerCamelCase_, padding="""max_length""", max_length=60, return_tensors="""np""" )
A : List[Any] = tokenizer(lowerCamelCase_, padding=lowerCamelCase_, truncate=lowerCamelCase_, return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1], 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1], 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1], 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1], 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowerCAmelCase ( self ):
A : Optional[int] = """$$$"""
A : Any = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=lowerCamelCase_, add_bos_token=lowerCamelCase_ )
A : Any = """This is a simple input"""
A : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
A : Optional[Any] = tokenizer.bos_token_id
A : str = tokenizer(lowerCamelCase_ )
A : Tuple = tokenizer(lowerCamelCase_ )
self.assertEqual(out_s.input_ids[0], lowerCamelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A : List[Any] = tokenizer.decode(out_s.input_ids )
A : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0], lowerCamelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
A : Dict = [self.get_tokenizer(do_lower_case=lowerCamelCase_, add_bos_token=lowerCamelCase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A : str = """Encode this."""
A : Dict = """This one too please."""
A : Union[str, Any] = tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
encoded_sequence += tokenizer.encode(lowerCamelCase_, add_special_tokens=lowerCamelCase_ )
A : int = tokenizer.encode_plus(
lowerCamelCase_, lowerCamelCase_, add_special_tokens=lowerCamelCase_, return_special_tokens_mask=lowerCamelCase_, )
A : Optional[int] = encoded_sequence_dict["""input_ids"""]
A : Optional[Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
A : Dict = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowerCamelCase_ )
]
A : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
A : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""", from_slow=lowerCamelCase_ )
A : Tuple = """A photo of a cat"""
A : int = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
A : List[Any] = AutoTokenizer.from_pretrained("""./test_opt""" )
A : int = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [2, 250, 1345, 9, 10, 4758] )
def _lowerCAmelCase ( self ):
A : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""", use_slow=lowerCamelCase_ )
A : Tuple = """A photo of a cat"""
A : str = tokenizer.encode(
lowerCamelCase_, )
# Same as above
self.assertEqual(lowerCamelCase_, [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def _lowerCAmelCase ( self ):
A : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""", from_slow=lowerCamelCase_ )
A : Union[str, Any] = """bos"""
A : Union[str, Any] = tokenizer.get_vocab()["""bos"""]
A : Optional[int] = """A photo of a cat"""
A : int = tokenizer.encode(
lowerCamelCase_, )
# We changed the bos token
self.assertEqual(lowerCamelCase_, [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
A : List[str] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
A : Optional[Any] = tokenizer.encode(
lowerCamelCase_, )
self.assertEqual(lowerCamelCase_, [3_1957, 250, 1345, 9, 10, 4758] )
| 662 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _snake_case (__lowercase):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ():
UpperCamelCase_ = 2
while True:
if is_prime(_lowercase):
yield num
num += 1
def _snake_case (__lowercase = 2000000):
return sum(takewhile(lambda __lowercase: x < n , prime_generator()))
if __name__ == "__main__":
print(f'{solution() = }')
| 23 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 0 |
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ ( lowerCamelCase_):
'''simple docstring'''
def __init__( self :Optional[int] , a :str ) -> Optional[int]:
__UpperCamelCase : List[Any] = data
def __iter__( self :Tuple ) -> Tuple:
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str=True) -> List[str]:
'''simple docstring'''
__UpperCamelCase : str = Accelerator(even_batches=_lowercase)
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict = False) -> Optional[int]:
'''simple docstring'''
if iterable:
__UpperCamelCase : int = DummyIterableDataset(torch.as_tensor(range(_lowercase)))
else:
__UpperCamelCase : Optional[int] = TensorDataset(torch.as_tensor(range(_lowercase)))
__UpperCamelCase : Union[str, Any] = DataLoader(_lowercase , batch_size=_lowercase)
__UpperCamelCase : List[Any] = accelerator.prepare(_lowercase)
return dl
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = create_dataloader(accelerator=_lowercase , dataset_size=_lowercase , batch_size=_lowercase)
__UpperCamelCase : Optional[Any] = [len(batch[0]) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : str = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCamelCase : Tuple = create_accelerator(even_batches=_lowercase)
verify_dataloader_batch_sizes(
_lowercase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_lowercase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = create_accelerator(even_batches=_lowercase)
__UpperCamelCase : str = torch.nn.Linear(1 , 1)
__UpperCamelCase : Any = accelerator.prepare(_lowercase)
__UpperCamelCase : Tuple = create_dataloader(_lowercase , dataset_size=3 , batch_size=1)
__UpperCamelCase : Tuple = []
with accelerator.join_uneven_inputs([ddp_model]):
for batch_idx, batch in enumerate(_lowercase):
__UpperCamelCase : List[str] = ddp_model(batch[0].float())
__UpperCamelCase : str = output.sum()
loss.backward()
batch_idxs.append(_lowercase)
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Tuple:
'''simple docstring'''
with warnings.catch_warnings(record=_lowercase) as w:
with accelerator.join_uneven_inputs([Mock()]):
pass
assert issubclass(w[-1].category , _lowercase)
assert "only supported for multi-GPU" in str(w[-1].message)
def _SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = False
__UpperCamelCase : Any = create_accelerator(even_batches=_lowercase)
__UpperCamelCase : Optional[Any] = torch.nn.Linear(1 , 1)
__UpperCamelCase : List[str] = accelerator.prepare(_lowercase)
__UpperCamelCase : Optional[Any] = create_dataloader(_lowercase , dataset_size=3 , batch_size=1)
__UpperCamelCase : List[Any] = create_dataloader(_lowercase , dataset_size=3 , batch_size=1)
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowercase):
__UpperCamelCase : Union[str, Any] = train_dl.batch_sampler.even_batches
__UpperCamelCase : Optional[int] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : str = create_accelerator(even_batches=_lowercase)
__UpperCamelCase : Optional[Any] = torch.nn.Linear(1 , 1)
__UpperCamelCase : Union[str, Any] = accelerator.prepare(_lowercase)
create_dataloader(_lowercase , dataset_size=3 , batch_size=1 , iterable=_lowercase)
__UpperCamelCase : Dict = create_dataloader(_lowercase , dataset_size=3 , batch_size=1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowercase):
__UpperCamelCase : Optional[int] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = create_accelerator()
__UpperCamelCase : Optional[int] = torch.nn.Linear(1 , 1)
__UpperCamelCase : Tuple = accelerator.prepare(_lowercase)
create_dataloader(_lowercase , dataset_size=3 , batch_size=1 , iterable=_lowercase)
with warnings.catch_warnings(record=_lowercase) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowercase):
pass
assert issubclass(w[-1].category , _lowercase)
assert "only supported for map-style datasets" in str(w[-1].message)
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes")
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled")
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs")
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs")
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types")
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning")
__UpperCamelCase : List[Any] = accelerator.state.distributed_type
__UpperCamelCase : List[Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowercase)
__UpperCamelCase : str = original_state
if __name__ == "__main__":
main() | 557 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : Dict = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ : Optional[int] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class UpperCamelCase ( lowerCamelCase_ ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["""input_ids""", """attention_mask"""]
lowerCAmelCase : Optional[Any] = TaTokenizer
lowerCAmelCase : Union[str, Any] = []
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="</s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__=100 , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A__ = [F"""<extra_id_{i}>""" for i in range(lowerCamelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
A__ = len(set(filter(lambda UpperCAmelCase__ : bool("extra_id_" in str(lowerCamelCase_ ) ) , lowerCamelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , extra_ids=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = extra_ids
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
A__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCamelCase_ , )
return max_model_length
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
A__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ):
return list(
set(filter(lambda UpperCAmelCase__ : bool(re.search(R"<extra_id_\d+>" , lowerCamelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ):
return [self.convert_tokens_to_ids(lowerCamelCase_ ) for token in self.get_sentinel_tokens()]
| 491 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ):
'''simple docstring'''
_lowercase = 1.0 if scale is None else scale
_lowercase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_ )] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.variance.sqrt()
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_lowercase = args_dim
_lowercase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) for dim in args_dim.values()] )
_lowercase = domain_map
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = [proj(lowerCamelCase_ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase_ )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
'''simple docstring'''
super().__init__()
_lowercase = function
def _UpperCAmelCase ( self , UpperCAmelCase , *UpperCAmelCase ):
'''simple docstring'''
return self.function(lowerCamelCase_ , *lowerCamelCase_ )
class _lowercase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ):
'''simple docstring'''
_lowercase = dim
_lowercase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_ )
else:
return Independent(self.distribution_class(*lowerCamelCase_ ) , 1 )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ):
'''simple docstring'''
_lowercase = self._base_distribution(lowerCamelCase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 0.0
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _UpperCAmelCase ( self , *UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(lowerCamelCase_ ) + 4.0 )) / 2.0
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ = {'df': 1, 'loc': 1, 'scale': 1}
lowerCAmelCase__ = StudentT
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = cls.squareplus(lowerCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
_lowercase = 2.0 + cls.squareplus(lowerCamelCase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ = {'loc': 1, 'scale': 1}
lowerCAmelCase__ = Normal
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = cls.squareplus(lowerCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ = {'total_count': 1, 'logits': 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = cls.squareplus(lowerCamelCase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _UpperCAmelCase ( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_ ) , 1 )
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
'''simple docstring'''
_lowercase , _lowercase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 398 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ :List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ :Any = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
snake_case__ : Union[str, Any] = 'xmod'
def __init__( self : List[str] , A__ : List[Any]=30522 , A__ : str=768 , A__ : List[Any]=12 , A__ : Any=12 , A__ : Optional[int]=3072 , A__ : str="gelu" , A__ : int=0.1 , A__ : Optional[int]=0.1 , A__ : str=512 , A__ : str=2 , A__ : Optional[int]=0.02 , A__ : str=1e-1_2 , A__ : Optional[Any]=1 , A__ : List[str]=0 , A__ : List[Any]=2 , A__ : Union[str, Any]="absolute" , A__ : int=True , A__ : Union[str, Any]=None , A__ : Optional[int]=False , A__ : Tuple=2 , A__ : str=False , A__ : Optional[Any]=True , A__ : Optional[Any]=True , A__ : Dict=("en_XX",) , A__ : Dict=None , **A__ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : Optional[int] = type_vocab_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Optional[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[Any] = classifier_dropout
__lowerCamelCase : List[str] = pre_norm
__lowerCamelCase : int = adapter_reduction_factor
__lowerCamelCase : Tuple = adapter_layer_norm
__lowerCamelCase : List[str] = adapter_reuse_layer_norm
__lowerCamelCase : List[Any] = ln_before_adapter
__lowerCamelCase : Union[str, Any] = list(lowerCamelCase_ )
__lowerCamelCase : str = default_language
class SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
@property
def a_ ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
__lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 150 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 0 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __snake_case ( lowerCamelCase_ , lowerCamelCase_):
_lowerCAmelCase = 1
@register_to_config
def __init__( self, A = 1000, A = None ):
"""simple docstring"""
self.set_timesteps(lowerCamelCase_ )
# standard deviation of the initial noise distribution
lowerCamelCase : List[str] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCamelCase : Optional[int] = 4
# running values
lowerCamelCase : Optional[int] = []
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : str = num_inference_steps
lowerCamelCase : int = torch.linspace(1, 0, num_inference_steps + 1 )[:-1]
lowerCamelCase : List[Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCamelCase : List[str] = torch.tensor(self.config.trained_betas, dtype=torch.floataa )
else:
lowerCamelCase : List[Any] = torch.sin(steps * math.pi / 2 ) ** 2
lowerCamelCase : Any = (1.0 - self.betas**2) ** 0.5
lowerCamelCase : int = (torch.atana(self.betas, self.alphas ) / math.pi * 2)[:-1]
lowerCamelCase : Optional[Any] = timesteps.to(lowerCamelCase_ )
lowerCamelCase : Optional[Any] = []
def UpperCAmelCase_ ( self, A, A, A, A = True, ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
lowerCamelCase : Dict = (self.timesteps == timestep).nonzero().item()
lowerCamelCase : Tuple = timestep_index + 1
lowerCamelCase : Tuple = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCamelCase_ )
if len(self.ets ) == 1:
lowerCamelCase : int = self.ets[-1]
elif len(self.ets ) == 2:
lowerCamelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCamelCase : List[str] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowerCamelCase : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowerCamelCase : Optional[int] = self._get_prev_sample(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCAmelCase_ ( self, A, *A, **A ):
"""simple docstring"""
return sample
def UpperCAmelCase_ ( self, A, A, A, A ):
"""simple docstring"""
lowerCamelCase : Dict = self.alphas[timestep_index]
lowerCamelCase : List[str] = self.betas[timestep_index]
lowerCamelCase : Tuple = self.alphas[prev_timestep_index]
lowerCamelCase : List[str] = self.betas[prev_timestep_index]
lowerCamelCase : int = (sample - sigma * ets) / max(lowerCamelCase_, 1e-8 )
lowerCamelCase : Union[str, Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 320 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
if "." in tensor_name:
_lowerCamelCase : Optional[int] = tensor_name.split("." )
for split in splits[:-1]:
_lowerCamelCase : Tuple = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
_lowerCamelCase : Tuple = new_module
_lowerCamelCase : str = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
_lowerCamelCase : List[Any] = tensor_name in module._buffers
_lowerCamelCase : Optional[int] = getattr(_lowercase , _lowercase )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
if is_buffer or not is_bitsandbytes_available():
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Tuple = False
else:
_lowerCamelCase : List[Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowerCamelCase : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowerCamelCase : int = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowerCamelCase : str = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
_lowerCamelCase : Optional[int] = value.to("cpu" )
if value.dtype == torch.inta:
_lowerCamelCase : Optional[Any] = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
_lowerCamelCase : Any = torch.tensor(_lowercase , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase ) and fpaa_statistics is None:
_lowerCamelCase : Optional[int] = new_value.T
_lowerCamelCase : Dict = old_value.__dict__
if is_abit:
_lowerCamelCase : Dict = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
elif is_abit:
_lowerCamelCase : Any = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
_lowerCamelCase : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(_lowercase ) )
else:
if value is None:
_lowerCamelCase : Dict = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
_lowerCamelCase : Union[str, Any] = value.to(_lowercase )
else:
_lowerCamelCase : List[str] = torch.tensor(_lowercase , device=_lowercase )
if is_buffer:
_lowerCamelCase : List[Any] = new_value
else:
_lowerCamelCase : Any = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad )
_lowerCamelCase : Dict = new_value
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Dict=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_lowerCamelCase : int = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase , nn.Linear ) or isinstance(_lowercase , _lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase , _lowerCamelCase : List[str] = module.weight.shape
else:
_lowerCamelCase : Optional[int] = module.in_features
_lowerCamelCase : Union[str, Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowerCamelCase : Optional[int] = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowerCamelCase : str = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowerCamelCase : Any = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowerCamelCase : Optional[Any] = True
# Store the module class in case we need to transpose the weight later
_lowerCamelCase : Tuple = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
_lowerCamelCase , _lowerCamelCase : int = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
_lowerCamelCase , _lowerCamelCase : Optional[Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def A_ ( *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[str] ):
"""simple docstring"""
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase )
def A_ ( *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowerCamelCase : Optional[Any] = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
_lowerCamelCase : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCamelCase : Any = sum(_lowercase , [] )
_lowerCamelCase : int = len(_lowercase ) > 0
# Check if it is a base model
_lowerCamelCase : List[Any] = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCamelCase : Union[str, Any] = list(model.named_children() )
_lowerCamelCase : str = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCamelCase : Any = set(_lowercase ) - set(_lowercase )
_lowerCamelCase : Any = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
_lowerCamelCase : List[str] = [".weight", ".bias"]
_lowerCamelCase : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCamelCase : Optional[int] = name.replace(_lowercase , "" )
filtered_module_names.append(_lowercase )
return filtered_module_names | 44 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a=13 , __a=30 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , ):
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = (image_size // patch_size) ** 2
lowerCamelCase = num_patches + 1
def _a (self ):
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _a (self ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = ViTMSNModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.type_sequence_label_size
lowerCamelCase = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase = 1
lowerCamelCase = ViTMSNForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase):
"""simple docstring"""
_A = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_A = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def _a (self ):
'''simple docstring'''
lowerCamelCase = ViTMSNModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def _a (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def _a (self ):
'''simple docstring'''
pass
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def _a (self ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(lowerCamelCase_ )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def _a (self ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = ViTMSNModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _a (self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def _a (self ):
'''simple docstring'''
torch.manual_seed(2 )
lowerCamelCase = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(lowerCamelCase_ )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCamelCase = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) | 623 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = 42
__SCREAMING_SNAKE_CASE : Union[str, Any] = 42
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : Tuple = [[] for _ in range(lowerCamelCase_ )]
A_ : Optional[int] = size
def __getitem__(self , lowercase ):
return iter(self._graph[vertex] )
@property
def _a (self ):
return self._size
def _a (self , lowercase , lowercase , lowercase ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(lowerCamelCase_ , lowerCamelCase_ ) )
def _a (self , lowercase , lowercase ):
A_ : str = deque([start_vertex] )
A_ : List[str] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : List[str] = queue.popleft()
A_ : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : List[str] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] )
return (item, float(_lowercase ))
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =random.randint(0 , len(_lowercase ) - 1 )
__lowercase =parent_a[:random_slice] + parent_a[random_slice:]
__lowercase =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =list(_lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowercase =random.choice(_lowercase )
return "".join(_lowercase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =[]
# Generate more children proportionally to the fitness score.
__lowercase =int(parent_a[1] * 100 ) + 1
__lowercase =10 if child_n >= 10 else child_n
for _ in range(_lowercase ):
__lowercase =population_score[random.randint(0 , _lowercase )][0]
__lowercase , __lowercase =crossover(parent_a[0] , _lowercase )
# Append new string to the population list.
pop.append(mutate(_lowercase , _lowercase ) )
pop.append(mutate(_lowercase , _lowercase ) )
return pop
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__lowercase =f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowercase =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowercase =f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowercase )
# Generate random starting population.
__lowercase =[]
for _ in range(_lowercase ):
population.append(''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowercase , __lowercase =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowercase =[evaluate(_lowercase , _lowercase ) for item in population]
# Check if there is a matching evolution.
__lowercase =sorted(_lowercase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowercase =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowercase )
# Normalize population score to be between 0 and 1.
__lowercase =[
(item, score / len(_lowercase )) for item, score in population_score
]
# This is selection
for i in range(_lowercase ):
population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
lowerCamelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"""
)
lowerCamelCase , lowerCamelCase , lowerCamelCase = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 474 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : int = "falcon"
__lowerCamelCase : int = ["past_key_values"]
def __init__( self, lowerCamelCase__=6_5024, lowerCamelCase__=4544, lowerCamelCase__=32, lowerCamelCase__=71, lowerCamelCase__=1e-5, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=None, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=11, lowerCamelCase__=11, **lowerCamelCase__, ):
A : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
A : Optional[Any] = kwargs.pop("""n_embed""", lowerCamelCase_ )
A : Optional[Any] = hidden_size if n_embed is None else n_embed
A : Optional[Any] = num_hidden_layers
A : Optional[int] = num_attention_heads
A : int = layer_norm_epsilon
A : Dict = initializer_range
A : Union[str, Any] = use_cache
A : Any = hidden_dropout
A : int = attention_dropout
A : Optional[int] = bos_token_id
A : Union[str, Any] = eos_token_id
A : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads
A : Any = alibi
A : Dict = new_decoder_architecture
A : Any = multi_query # Ignored when new_decoder_architecture is True
A : Optional[int] = parallel_attn
A : str = bias
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
@property
def _lowerCAmelCase ( self ):
return self.hidden_size // self.num_attention_heads
@property
def _lowerCAmelCase ( self ):
return not self.alibi
| 662 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _a ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = """ml.p3.2xlarge"""
A_ = """accelerate_sagemaker_execution_role"""
A_ = """hf-sm"""
A_ = """us-east-1"""
A_ = 1
A_ = """accelerate-sagemaker-1"""
A_ = """1.6"""
A_ = """4.4"""
A_ = """train.py"""
A_ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
A_ = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , lowerCamelCase_ )
assert isinstance(converted_args['do_train'] , lowerCamelCase_ )
assert isinstance(converted_args['epochs'] , lowerCamelCase_ )
assert isinstance(converted_args['learning_rate'] , lowerCamelCase_ )
assert isinstance(converted_args['max_steps'] , lowerCamelCase_ )
with pytest.raises(lowerCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 23 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCamelCase__ ( lowerCamelCase_):
'''simple docstring'''
_A = 'detr'
_A = ['past_key_values']
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self :Dict , a :Tuple=True , a :str=None , a :Optional[int]=3 , a :Union[str, Any]=1_0_0 , a :List[str]=6 , a :Dict=2_0_4_8 , a :Optional[int]=8 , a :str=6 , a :Optional[Any]=2_0_4_8 , a :List[Any]=8 , a :Any=0.0 , a :Optional[Any]=0.0 , a :Dict=True , a :str="relu" , a :str=2_5_6 , a :Optional[int]=0.1 , a :List[str]=0.0 , a :str=0.0 , a :List[str]=0.02 , a :Union[str, Any]=1.0 , a :Union[str, Any]=False , a :List[Any]="sine" , a :int="resnet50" , a :int=True , a :Union[str, Any]=False , a :Dict=1 , a :List[str]=5 , a :int=2 , a :Any=1 , a :Optional[Any]=1 , a :int=5 , a :List[Any]=2 , a :List[Any]=0.1 , **a :Optional[int] , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCamelCase : Any = backbone_config.get("model_type" )
__UpperCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : Any = config_class.from_dict(lowerCamelCase_ )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = None, None, None
__UpperCamelCase : Union[str, Any] = use_timm_backbone
__UpperCamelCase : Optional[int] = backbone_config
__UpperCamelCase : int = num_channels
__UpperCamelCase : Dict = num_queries
__UpperCamelCase : int = d_model
__UpperCamelCase : Optional[int] = encoder_ffn_dim
__UpperCamelCase : str = encoder_layers
__UpperCamelCase : int = encoder_attention_heads
__UpperCamelCase : List[Any] = decoder_ffn_dim
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Any = decoder_attention_heads
__UpperCamelCase : Tuple = dropout
__UpperCamelCase : List[Any] = attention_dropout
__UpperCamelCase : str = activation_dropout
__UpperCamelCase : Dict = activation_function
__UpperCamelCase : str = init_std
__UpperCamelCase : int = init_xavier_std
__UpperCamelCase : Optional[int] = encoder_layerdrop
__UpperCamelCase : Any = decoder_layerdrop
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : List[Any] = auxiliary_loss
__UpperCamelCase : str = position_embedding_type
__UpperCamelCase : str = backbone
__UpperCamelCase : Any = use_pretrained_backbone
__UpperCamelCase : List[str] = dilation
# Hungarian matcher
__UpperCamelCase : int = class_cost
__UpperCamelCase : Optional[int] = bbox_cost
__UpperCamelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCamelCase : List[Any] = mask_loss_coefficient
__UpperCamelCase : Optional[int] = dice_loss_coefficient
__UpperCamelCase : Tuple = bbox_loss_coefficient
__UpperCamelCase : Optional[Any] = giou_loss_coefficient
__UpperCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def _lowerCamelCase ( self :List[Any] ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls :Union[str, Any] , a :int , **a :Dict ) -> List[str]:
return cls(backbone_config=lowerCamelCase_ , **lowerCamelCase_ )
def _lowerCamelCase ( self :str ) -> Dict[str, any]:
__UpperCamelCase : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCamelCase : Union[str, Any] = self.backbone_config.to_dict()
__UpperCamelCase : Union[str, Any] = self.__class__.model_type
return output
class lowerCamelCase__ ( lowerCamelCase_):
'''simple docstring'''
_A = version.parse('1.11')
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self :Optional[Any] ) -> float:
return 1E-5
@property
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return 1_2 | 557 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 0 |
import copy
import re
class UpperCamelCase :
lowerCAmelCase : int = """hp"""
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Tuple = None
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = prefix
A__ = defaults
cls.build_naming_info()
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
if len(lowerCamelCase_ ) == 0:
return ""
A__ = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase_ ) + 1 ):
A__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase__ ):
A__ = ""
while integer != 0:
A__ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
A__ = 0
while True:
A__ = word + "#" + int_to_alphabetic(lowerCamelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
A__ = sword
break
A__ = short_word
A__ = word
return short_word
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = param_name.split("_" )
A__ = [TrialShortNamer.shortname_for_word(lowerCamelCase_ , lowerCamelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A__ = ["", "_"]
for separator in separators:
A__ = separator.join(lowerCamelCase_ )
if shortname not in info["reverse_short_param"]:
A__ = shortname
A__ = param_name
return shortname
return param_name
@staticmethod
def __A ( UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TrialShortNamer.shortname_for_key(lowerCamelCase_ , lowerCamelCase_ )
A__ = short_name
A__ = param_name
@classmethod
def __A ( cls ):
if cls.NAMING_INFO is not None:
return
A__ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
A__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase_ , lowerCamelCase_ )
A__ = info
@classmethod
def __A ( cls , UpperCAmelCase__ ):
cls.build_naming_info()
assert cls.PREFIX is not None
A__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A__ = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A__ = 1 if v else 0
A__ = "" if isinstance(lowerCamelCase_ , (int, float) ) else "-"
A__ = F"""{key}{sep}{v}"""
name.append(lowerCamelCase_ )
return "_".join(lowerCamelCase_ )
@classmethod
def __A ( cls , UpperCAmelCase__ ):
A__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A__ = []
else:
A__ = repr.split("_" )
A__ = {}
for value in values:
if "-" in value:
A__ , A__ = value.split("-" )
else:
A__ = re.sub("[0-9.]" , "" , lowerCamelCase_ )
A__ = float(re.sub("[^0-9.]" , "" , lowerCamelCase_ ) )
A__ = cls.NAMING_INFO["reverse_short_param"][p_k]
A__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A__ = cls.DEFAULTS[k]
return parameters
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _lowercase :
"""simple docstring"""
lowerCAmelCase__ = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCamelCase_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = os.path.join(lowerCamelCase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase_ )
_lowercase = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
_lowercase = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.feature_extraction_class()
self.assertIsNotNone(lowerCamelCase_ )
| 398 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCAmelCase__ :Optional[Any] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def a_ ( cls : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a_ ( cls : Optional[int] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def a_ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ , repo_id="""test-config""" , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
__lowerCamelCase : List[str] = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def a_ ( self : int ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
__lowerCamelCase : Optional[Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id="""valid_org/test-config-org""" , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
__lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def a_ ( self : Tuple ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
__lowerCamelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
__lowerCamelCase : Any = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase : int = c.n_embd + 1 # int
__lowerCamelCase : Union[str, Any] = c.resid_pdrop + 1.0 # float
__lowerCamelCase : Optional[Any] = not c.scale_attn_weights # bool
__lowerCamelCase : str = c.summary_type + """foo""" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(lowerCamelCase_ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(lowerCamelCase_ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(lowerCamelCase_ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(lowerCamelCase_ , c.summary_type , """mismatch for key: summary_type""" )
def a_ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = PretrainedConfig()
__lowerCamelCase : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
__lowerCamelCase : Union[str, Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_ , lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f" {', '.join(lowerCamelCase_ )}." )
def a_ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : Tuple = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
__lowerCamelCase : Tuple = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(lowerCamelCase_ )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : str = mock.Mock()
__lowerCamelCase : Tuple = 500
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : Any = HTTPError
__lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase_ ) as mock_head:
__lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Any = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained("""bert-base-cased""" )
__lowerCamelCase : str = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase_ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase : Tuple = ["""config.42.0.0.json"""]
__lowerCamelCase : Any = 768
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_ , """config.4.0.0.json""" ) , os.path.join(lowerCamelCase_ , """config.42.0.0.json""" ) )
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 768 )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
__lowerCamelCase : Optional[int] = """v4.0.0"""
__lowerCamelCase , __lowerCamelCase : int = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_ , return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase : str = """v3.0.0"""
__lowerCamelCase : str = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 150 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __snake_case ( lowerCamelCase_):
_lowerCAmelCase = '''unispeech-sat'''
def __init__( self, A=32, A=768, A=12, A=12, A=3072, A="gelu", A=0.1, A=0.1, A=0.1, A=0.0, A=0.0, A=0.1, A=0.1, A=0.02, A=1e-5, A="group", A="gelu", A=(512, 512, 512, 512, 512, 512, 512), A=(5, 2, 2, 2, 2, 2, 2), A=(10, 3, 3, 3, 3, 2, 2), A=False, A=128, A=16, A=False, A=True, A=0.05, A=10, A=2, A=0.0, A=10, A=0, A=320, A=2, A=0.1, A=100, A=256, A=256, A=0.1, A="mean", A=False, A=False, A=256, A=(512, 512, 512, 512, 1500), A=(5, 3, 3, 1, 1), A=(1, 2, 3, 1, 1), A=512, A=0, A=1, A=2, A=504, **A, ):
"""simple docstring"""
super().__init__(**lowerCamelCase_, pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_ )
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Union[str, Any] = feat_extract_norm
lowerCamelCase : List[Any] = feat_extract_activation
lowerCamelCase : Optional[int] = list(lowerCamelCase_ )
lowerCamelCase : List[str] = list(lowerCamelCase_ )
lowerCamelCase : str = list(lowerCamelCase_ )
lowerCamelCase : Tuple = conv_bias
lowerCamelCase : str = num_conv_pos_embeddings
lowerCamelCase : Union[str, Any] = num_conv_pos_embedding_groups
lowerCamelCase : Tuple = len(self.conv_dim )
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_dropout
lowerCamelCase : Optional[Any] = attention_dropout
lowerCamelCase : List[Any] = activation_dropout
lowerCamelCase : Union[str, Any] = feat_proj_dropout
lowerCamelCase : Optional[int] = final_dropout
lowerCamelCase : Any = layerdrop
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Union[str, Any] = num_clusters
lowerCamelCase : List[str] = do_stable_layer_norm
lowerCamelCase : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Tuple = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : int = mask_time_length
lowerCamelCase : List[str] = mask_time_min_masks
lowerCamelCase : int = mask_feature_prob
lowerCamelCase : Dict = mask_feature_length
lowerCamelCase : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase : int = num_codevectors_per_group
lowerCamelCase : int = num_codevector_groups
lowerCamelCase : Optional[Any] = contrastive_logits_temperature
lowerCamelCase : Optional[Any] = feat_quantizer_dropout
lowerCamelCase : str = num_negatives
lowerCamelCase : Tuple = codevector_dim
lowerCamelCase : Optional[int] = proj_codevector_dim
lowerCamelCase : Union[str, Any] = diversity_loss_weight
# ctc loss
lowerCamelCase : Any = ctc_loss_reduction
lowerCamelCase : List[Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : Union[str, Any] = list(lowerCamelCase_ )
lowerCamelCase : Union[str, Any] = list(lowerCamelCase_ )
lowerCamelCase : List[Any] = list(lowerCamelCase_ )
lowerCamelCase : Tuple = xvector_output_dim
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 320 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowerCamelCase_ ):
def __init__( self : Optional[Any],*__A : Tuple,**__A : Tuple ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead.",lowerCamelCase_,)
super().__init__(*lowerCamelCase_,**lowerCamelCase_ ) | 44 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[str] = logging.get_logger(__name__)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if "resnet-50" in model_name:
lowerCamelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
lowerCamelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
lowerCamelCase = DetrConfig(use_timm_backbone=_lowercase , backbone_config=_lowercase )
# set label attributes
lowerCamelCase = "panoptic" in model_name
if is_panoptic:
lowerCamelCase = 250
else:
lowerCamelCase = 91
lowerCamelCase = "huggingface/label-files"
lowerCamelCase = "coco-detection-id2label.json"
lowerCamelCase = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
lowerCamelCase = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase = idalabel
lowerCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = state_dict.pop(_lowercase )
lowerCamelCase = val
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=False ):
"""simple docstring"""
lowerCamelCase = ""
if is_panoptic:
lowerCamelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase = in_proj_weight[:256, :]
lowerCamelCase = in_proj_bias[:256]
lowerCamelCase = in_proj_weight[256:512, :]
lowerCamelCase = in_proj_bias[256:512]
lowerCamelCase = in_proj_weight[-256:, :]
lowerCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase = in_proj_weight[:256, :]
lowerCamelCase = in_proj_bias[:256]
lowerCamelCase = in_proj_weight[256:512, :]
lowerCamelCase = in_proj_bias[256:512]
lowerCamelCase = in_proj_weight[-256:, :]
lowerCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase = in_proj_weight_cross_attn[:256, :]
lowerCamelCase = in_proj_bias_cross_attn[:256]
lowerCamelCase = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase = in_proj_bias_cross_attn[256:512]
lowerCamelCase = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase = in_proj_bias_cross_attn[-256:]
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = get_detr_config(_lowercase )
# load original model from torch hub
lowerCamelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
lowerCamelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=_lowercase ).eval()
lowerCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowercase ):
if is_panoptic:
lowerCamelCase = "detr." + src
rename_key(_lowercase , _lowercase , _lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowerCamelCase = state_dict.pop(_lowercase )
lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase = state_dict.pop(_lowercase )
lowerCamelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowerCamelCase = state_dict.pop(_lowercase )
lowerCamelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCamelCase = state_dict.pop(_lowercase )
lowerCamelCase = val
# finally, create HuggingFace model and load state dict
lowerCamelCase = DetrForSegmentation(_lowercase ) if is_panoptic else DetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion on an image
lowerCamelCase = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCamelCase = DetrImageProcessor(format=_lowercase )
lowerCamelCase = processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase = encoding["pixel_values"]
lowerCamelCase = detr(_lowercase )
lowerCamelCase = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ : Optional[int] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 623 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowerCAmelCase ( lowerCamelCase_ ):
def __init__(self , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = dataset
A_ : Dict = process
A_ : Optional[int] = params
def __len__(self ):
return len(self.dataset )
def __getitem__(self , lowercase ):
A_ : List[str] = self.dataset[i]
A_ : Optional[int] = self.process(lowerCamelCase_ , **self.params )
return processed
class _lowerCAmelCase ( lowerCamelCase_ ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Any = loader
A_ : Optional[Any] = infer
A_ : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A_ : Optional[Any] = None
A_ : Any = loader_batch_size
# Internal bookkeeping
A_ : str = None
A_ : int = None
def __len__(self ):
return len(self.loader )
def __iter__(self ):
A_ : Any = iter(self.loader )
return self
def _a (self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A_ : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A_ : Optional[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# Convert ModelOutput to tuple first
A_ : Optional[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A_ : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ : List[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A_ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ : Any = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A_ : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A_ : Dict = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A_ : int = self._loader_batch_data.__class__(lowerCamelCase_ )
self._loader_batch_index += 1
return result
def _a (self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A_ : Optional[Any] = next(self.iterator )
A_ : Dict = self.infer(lowerCamelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase_ , torch.Tensor ):
A_ : Optional[Any] = processed
else:
A_ : Union[str, Any] = list(processed.keys() )[0]
A_ : int = processed[key]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A_ : Dict = len(lowerCamelCase_ )
else:
A_ : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ : str = observed_batch_size
# Setting internal index to unwrap the batch
A_ : Optional[int] = processed
A_ : Optional[int] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowerCAmelCase ( lowerCamelCase_ ):
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
super().__init__(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __iter__(self ):
A_ : str = iter(self.loader )
A_ : Optional[Any] = None
return self
def _a (self ):
if self.subiterator is None:
A_ : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A_ : List[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A_ : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
A_ : int = next(self.subiterator )
return processed
class _lowerCAmelCase ( lowerCamelCase_ ):
def __iter__(self ):
A_ : Dict = iter(self.loader )
return self
def _a (self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A_ : Tuple = False
A_ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A_ : Optional[Any] = self.loader_batch_item()
A_ : List[Any] = item.pop("""is_last""" )
accumulator.append(lowerCamelCase_ )
if is_last:
return accumulator
while not is_last:
A_ : Dict = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase_ , torch.Tensor ):
A_ : Optional[int] = processed
else:
A_ : Any = list(processed.keys() )[0]
A_ : str = processed[key]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
A_ : Optional[Any] = len(lowerCamelCase_ )
else:
A_ : Any = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ : int = observed_batch_size
A_ : Optional[int] = processed
A_ : Any = 0
while self._loader_batch_index < self.loader_batch_size:
A_ : int = self.loader_batch_item()
A_ : Optional[Any] = item.pop("""is_last""" )
accumulator.append(lowerCamelCase_ )
if is_last:
return accumulator
else:
A_ : Any = processed
A_ : str = item.pop("""is_last""" )
accumulator.append(lowerCamelCase_ )
return accumulator
class _lowerCAmelCase ( lowerCamelCase_ ):
def __init__(self , lowercase , lowercase ):
A_ : List[str] = dataset
A_ : Dict = key
def __len__(self ):
return len(self.dataset )
def __getitem__(self , lowercase ):
return self.dataset[i][self.key]
class _lowerCAmelCase ( lowerCamelCase_ ):
def __init__(self , lowercase , lowercase , lowercase ):
A_ : int = dataset
A_ : Dict = keya
A_ : Dict = keya
def __len__(self ):
return len(self.dataset )
def __getitem__(self , lowercase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 667 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _UpperCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = """mobilenet_v1"""
def __init__( self : str , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Optional[int]=2_2_4 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : str=8 , _lowerCAmelCase : int="relu6" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=0.999 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=0.001 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.')
__lowercase =num_channels
__lowercase =image_size
__lowercase =depth_multiplier
__lowercase =min_depth
__lowercase =hidden_act
__lowercase =tf_padding
__lowercase =classifier_dropout_prob
__lowercase =initializer_range
__lowercase =layer_norm_eps
class _UpperCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})])
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return 1e-4
| 474 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE_:Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:Dict = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Dict = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = SqueezeBertTokenizer
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__="[UNK]", lowerCamelCase__="[SEP]", lowerCamelCase__="[PAD]", lowerCamelCase__="[CLS]", lowerCamelCase__="[MASK]", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, tokenize_chinese_chars=lowerCamelCase_, strip_accents=lowerCamelCase_, **lowerCamelCase_, )
A : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", lowerCamelCase_ ) != tokenize_chinese_chars
):
A : Optional[Any] = getattr(lowerCamelCase_, normalizer_state.pop("""type""" ) )
A : Dict = do_lower_case
A : Any = strip_accents
A : int = tokenize_chinese_chars
A : Any = normalizer_class(**lowerCamelCase_ )
A : List[str] = do_lower_case
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : str = [self.sep_token_id]
A : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[Any] = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 662 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = AutoConfig.from_pretrained(_lowercase)
UpperCamelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowercase)
UpperCamelCase_ = checkpoints.load_tax_checkpoint(_lowercase)
UpperCamelCase_ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
UpperCamelCase_ = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase_ = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase_ = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].')
# Encoder
for layer_index in range(config.num_layers):
UpperCamelCase_ = f"""layers_{str(_lowercase)}"""
# Self-Attention
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
UpperCamelCase_ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
UpperCamelCase_ = flax_model.params['encoder']['block'][str(_lowercase)]['layer']
UpperCamelCase_ = tax_attention_key
UpperCamelCase_ = tax_attention_out
UpperCamelCase_ = tax_attention_query
UpperCamelCase_ = tax_attention_value
UpperCamelCase_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase_ = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase_ = tax_mlp_wi_a
UpperCamelCase_ = tax_mlp_wi_a
else:
UpperCamelCase_ = tax_mlp_wi
UpperCamelCase_ = tax_mlp_wo
UpperCamelCase_ = tax_mlp_layer_norm
UpperCamelCase_ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase_ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
UpperCamelCase_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase_ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
UpperCamelCase_ = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase_ = tax_model['target']['encoder']['encoder_norm']['scale']
UpperCamelCase_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
UpperCamelCase_ = f"""layers_{str(_lowercase)}"""
# Self-Attention
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
UpperCamelCase_ = tax_enc_dec_attention_module['key']['kernel']
UpperCamelCase_ = tax_enc_dec_attention_module['out']['kernel']
UpperCamelCase_ = tax_enc_dec_attention_module['query']['kernel']
UpperCamelCase_ = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
UpperCamelCase_ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
UpperCamelCase_ = flax_model.params['decoder']['block'][str(_lowercase)]['layer']
UpperCamelCase_ = tax_attention_key
UpperCamelCase_ = tax_attention_out
UpperCamelCase_ = tax_attention_query
UpperCamelCase_ = tax_attention_value
UpperCamelCase_ = tax_pre_attention_layer_norm
UpperCamelCase_ = tax_enc_dec_attention_key
UpperCamelCase_ = tax_enc_dec_attention_out
UpperCamelCase_ = tax_enc_dec_attention_query
UpperCamelCase_ = tax_enc_dec_attention_value
UpperCamelCase_ = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase_ = tax_mlp_wi_a
UpperCamelCase_ = tax_mlp_wi_a
else:
UpperCamelCase_ = tax_mlp_wi
UpperCamelCase_ = tax_mlp_wo
UpperCamelCase_ = txa_mlp_layer_norm
UpperCamelCase_ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase_ = tax_model['target']['decoder']['decoder_norm']['scale']
UpperCamelCase_ = txa_decoder_norm
# Only for layer 0:
UpperCamelCase_ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
UpperCamelCase_ = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase_ = tax_model['target']['token_embedder']['embedding']
UpperCamelCase_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase_ = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(_lowercase)
print('T5X Model was sucessfully converted!')
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
snake_case__ : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 23 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 0 |
import string
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> List[Any]:
'''simple docstring'''
for key in range(len(string.ascii_uppercase)):
__UpperCamelCase : int = ""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase : List[Any] = string.ascii_uppercase.find(_lowercase)
__UpperCamelCase : Optional[int] = num - key
if num < 0:
__UpperCamelCase : Union[str, Any] = num + len(string.ascii_uppercase)
__UpperCamelCase : str = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase : List[Any] = translated + symbol
print(F'Decryption using Key #{key}: {translated}')
def _SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = input("Encrypted message: ")
__UpperCamelCase : List[Any] = message.upper()
decrypt(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 557 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 0 |
from __future__ import annotations
UpperCAmelCase_ : List[str] = tuple[int, int, int]
UpperCAmelCase_ : Union[str, Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase_ : List[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase_ : List[Any] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCAmelCase_ : str = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCAmelCase_ : List[Any] = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCAmelCase_ : List[Any] = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase_ : int = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCAmelCase_ : Tuple = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCAmelCase_ : Optional[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCAmelCase_ : Union[str, Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCAmelCase_ : str = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCAmelCase_ : List[Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def UpperCamelCase ( _A : int , _A : Dict , _A : int )-> Tuple:
"""simple docstring"""
if (unique_rotsel := len(set(_lowercase ) )) < 3:
A__ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_lowercase )
# Checks if rotor positions are valid
A__ , A__ , A__ = rotpos
if not 0 < rotorposa <= len(_lowercase ):
A__ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
A__ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowercase )
if not 0 < rotorposa <= len(_lowercase ):
A__ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_lowercase )
# Validates string and returns dict
A__ = _plugboard(_lowercase )
return rotpos, rotsel, pbdict
def UpperCamelCase ( _A : Dict )-> Optional[Any]:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
A__ = f"""Plugboard setting isn\'t type string ({type(_lowercase )})"""
raise TypeError(_lowercase )
elif len(_lowercase ) % 2 != 0:
A__ = f"""Odd number of symbols ({len(_lowercase )})"""
raise Exception(_lowercase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
A__ = set()
for i in pbstring:
if i not in abc:
A__ = f"""\'{i}\' not in list of symbols"""
raise Exception(_lowercase )
elif i in tmppbl:
A__ = f"""Duplicate symbol ({i})"""
raise Exception(_lowercase )
else:
tmppbl.add(_lowercase )
del tmppbl
# Created the dictionary
A__ = {}
for j in range(0 , len(_lowercase ) - 1 , 2 ):
A__ = pbstring[j + 1]
A__ = pbstring[j]
return pb
def UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : int = (rotora, rotora, rotora) , _A : Optional[int] = "" , )-> Union[str, Any]:
"""simple docstring"""
A__ = text.upper()
A__ , A__ , A__ = _validator(
_lowercase , _lowercase , plugb.upper() )
A__ , A__ , A__ = rotor_position
A__ , A__ , A__ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
A__ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
A__ = plugboard[symbol]
# rotor ra --------------------------
A__ = abc.index(_lowercase ) + rotorposa
A__ = rotora[index % len(_lowercase )]
# rotor rb --------------------------
A__ = abc.index(_lowercase ) + rotorposa
A__ = rotora[index % len(_lowercase )]
# rotor rc --------------------------
A__ = abc.index(_lowercase ) + rotorposa
A__ = rotora[index % len(_lowercase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
A__ = reflector[symbol]
# 2nd rotors
A__ = abc[rotora.index(_lowercase ) - rotorposa]
A__ = abc[rotora.index(_lowercase ) - rotorposa]
A__ = abc[rotora.index(_lowercase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
A__ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowercase ):
A__ = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
A__ = 0
rotorposa += 1
if rotorposa >= len(_lowercase ):
A__ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowercase )
return "".join(_lowercase )
if __name__ == "__main__":
UpperCAmelCase_ : int = "This is my Python script that emulates the Enigma machine from WWII."
UpperCAmelCase_ : List[str] = (1, 1, 1)
UpperCAmelCase_ : int = "pictures"
UpperCAmelCase_ : Union[str, Any] = (rotora, rotora, rotora)
UpperCAmelCase_ : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 491 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 0 |
import random
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase , _lowercase , _lowercase = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
if index >= len(_lowercase ) or index < 0:
return None
_lowercase = items[random.randint(0 ,len(_lowercase ) - 1 )]
_lowercase = 0
_lowercase , _lowercase , _lowercase = _partition(_lowercase ,_lowercase )
_lowercase = len(_lowercase )
_lowercase = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase ,_lowercase )
# must be in larger
else:
return quick_select(_lowercase ,index - (m + count) )
| 398 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
UpperCAmelCase__ :Union[str, Any] = parse(importlib.metadata.version("""torch"""))
def __lowercase (_lowercase, _lowercase, _lowercase ) -> List[Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
__lowerCamelCase : Dict = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowercase, _lowercase ):
__lowerCamelCase : str = parse(importlib.metadata.version(_lowercase ) )
return operation(_lowercase, parse(_lowercase ) )
def __lowercase (_lowercase, _lowercase ) -> List[Any]:
"""simple docstring"""
return compare_versions(_lowercase, _lowercase, _lowercase )
| 150 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __snake_case ( lowerCamelCase_):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 320 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowerCamelCase : Dict = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe",safety_checker=lowerCamelCase_,cache_dir=lowerCamelCase_ )
_lowerCamelCase : Tuple = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_,os.listdir(lowerCamelCase_ )[0],"snapshots" ) )]
_lowerCamelCase : List[Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe",safety_checker=lowerCamelCase_ )
_lowerCamelCase : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_lowerCamelCase : int = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : List[str] = jax.device_count()
_lowerCamelCase : List[str] = num_samples * [prompt]
_lowerCamelCase : Optional[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_lowerCamelCase : Optional[int] = replicate(lowerCamelCase_ )
_lowerCamelCase : Any = jax.random.split(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : Tuple = shard(lowerCamelCase_ )
_lowerCamelCase : Optional[Any] = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(lowerCamelCase_,dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
_lowerCamelCase : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase_ ) == num_samples
def lowerCamelCase_ ( self : int ):
_lowerCamelCase , _lowerCamelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="flax",safety_checker=lowerCamelCase_ )
_lowerCamelCase : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_lowerCamelCase : Optional[int] = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = 5_0
_lowerCamelCase : Tuple = jax.device_count()
_lowerCamelCase : str = num_samples * [prompt]
_lowerCamelCase : Dict = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_lowerCamelCase : Dict = replicate(lowerCamelCase_ )
_lowerCamelCase : str = jax.random.split(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = shard(lowerCamelCase_ )
_lowerCamelCase : Any = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_,dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,safety_checker=lowerCamelCase_ )
_lowerCamelCase : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_lowerCamelCase : Any = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = 5_0
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : Union[str, Any] = num_samples * [prompt]
_lowerCamelCase : Tuple = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_lowerCamelCase : List[str] = replicate(lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = jax.random.split(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : int = shard(lowerCamelCase_ )
_lowerCamelCase : Any = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase , _lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa )
_lowerCamelCase : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_lowerCamelCase : Dict = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[Any] = 5_0
_lowerCamelCase : Optional[Any] = jax.device_count()
_lowerCamelCase : int = num_samples * [prompt]
_lowerCamelCase : str = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_lowerCamelCase : int = replicate(lowerCamelCase_ )
_lowerCamelCase : List[Any] = jax.random.split(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : List[str] = shard(lowerCamelCase_ )
_lowerCamelCase : Any = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = FlaxDDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule="scaled_linear",set_alpha_to_one=lowerCamelCase_,steps_offset=1,)
_lowerCamelCase , _lowerCamelCase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,scheduler=lowerCamelCase_,safety_checker=lowerCamelCase_,)
_lowerCamelCase : Union[str, Any] = scheduler.create_state()
_lowerCamelCase : Dict = scheduler_state
_lowerCamelCase : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_lowerCamelCase : Optional[int] = jax.random.PRNGKey(0 )
_lowerCamelCase : Dict = 5_0
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : Optional[Any] = num_samples * [prompt]
_lowerCamelCase : Optional[Any] = pipeline.prepare_inputs(lowerCamelCase_ )
# shard inputs and rng
_lowerCamelCase : List[str] = replicate(lowerCamelCase_ )
_lowerCamelCase : Tuple = jax.random.split(lowerCamelCase_,lowerCamelCase_ )
_lowerCamelCase : Dict = shard(lowerCamelCase_ )
_lowerCamelCase : Tuple = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(lowerCamelCase_,dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_lowerCamelCase : Tuple = jax.device_count()
_lowerCamelCase : Optional[Any] = num_samples * [prompt]
_lowerCamelCase : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ),lowerCamelCase_ )
_lowerCamelCase , _lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,safety_checker=lowerCamelCase_,)
_lowerCamelCase : Optional[int] = replicate(lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = pipeline.prepare_inputs(lowerCamelCase_ )
_lowerCamelCase : Dict = shard(lowerCamelCase_ )
_lowerCamelCase : Tuple = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Dict = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
_lowerCamelCase , _lowerCamelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",revision="bf16",dtype=jnp.bfloataa,safety_checker=lowerCamelCase_,use_memory_efficient_attention=lowerCamelCase_,)
_lowerCamelCase : Optional[Any] = replicate(lowerCamelCase_ )
_lowerCamelCase : Union[str, Any] = pipeline.prepare_inputs(lowerCamelCase_ )
_lowerCamelCase : Any = shard(lowerCamelCase_ )
_lowerCamelCase : Dict = pipeline(lowerCamelCase_,lowerCamelCase_,lowerCamelCase_,jit=lowerCamelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2 | 44 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 0 |
import os
import sys
import unittest
a_ : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'transformers')
a_ : Optional[Any] = '\n{0} = None\n'
a_ : Any = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
a_ : List[str] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(lowerCamelCase_ )
lowerCamelCase = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(lowerCamelCase_ , "tokenizers" )
lowerCamelCase = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(lowerCamelCase_ , "tensorflow_text" )
lowerCamelCase = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(lowerCamelCase_ , "sentencepiece_and_tokenizers" )
lowerCamelCase = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(lowerCamelCase_ , "sentencepiece_and_tensorflow_text" )
lowerCamelCase = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(lowerCamelCase_ , "sentencepiece_and_tokenizers_and_vision" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowerCamelCase_ )
self.assertIn("tensorflow_text" , lowerCamelCase_ )
self.assertIn("sentencepiece_and_tokenizers" , lowerCamelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(lowerCamelCase_ , "\nCONSTANT = None\n" )
lowerCamelCase = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
lowerCamelCase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
lowerCamelCase = "\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n"
lowerCamelCase = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
lowerCamelCase = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowerCamelCase_ ) | 623 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(_lowercase )
if n_element < 1:
A_ : List[str] = ValueError("""a should be a positive number""" )
raise my_error
A_ : List[str] = [1]
A_, A_, A_ : str = (0, 0, 0)
A_ : Tuple = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCamelCase :int = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowerCamelCase :Dict = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''') | 667 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase = ["""text""", """image""", """audio"""]
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCamelCase :
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
__lowercase =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
__lowercase =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =create_inputs(self.tool.inputs)
__lowercase =self.tool(*lowerCamelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
__lowercase =[outputs]
self.assertListEqual(output_types(lowerCamelCase_) , self.tool.outputs)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =create_inputs(self.tool.inputs)
__lowercase =self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowercase =[outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs))
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs):
__lowercase =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_))
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =create_inputs(self.tool.inputs)
__lowercase =[]
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs):
if isinstance(lowerCamelCase_ , lowerCamelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
__lowercase =self.tool(*lowerCamelCase_)
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowercase =[outputs]
self.assertEqual(len(lowerCamelCase_) , len(self.tool.outputs))
| 474 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
SCREAMING_SNAKE_CASE_:Dict = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
SCREAMING_SNAKE_CASE_:Dict = """UperNetConfig"""
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 0, lowerCamelCase__ = False, lowerCamelCase__ = 1, ):
super().__init__()
A : Optional[Any] = nn.Convad(
in_channels=lowerCamelCase_, out_channels=lowerCamelCase_, kernel_size=lowerCamelCase_, padding=lowerCamelCase_, bias=lowerCamelCase_, dilation=lowerCamelCase_, )
A : List[str] = nn.BatchNormad(lowerCamelCase_ )
A : Optional[int] = nn.ReLU()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Tuple = self.conv(lowerCamelCase_ )
A : str = self.batch_norm(lowerCamelCase_ )
A : Tuple = self.activation(lowerCamelCase_ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : List[str] = [
nn.AdaptiveAvgPoolad(lowerCamelCase_ ),
UperNetConvModule(lowerCamelCase_, lowerCamelCase_, kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase_ ), lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = input
for layer in self.layers:
A : Tuple = layer(lowerCamelCase_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Dict = pool_scales
A : Dict = align_corners
A : List[str] = in_channels
A : Dict = channels
A : int = []
for i, pool_scale in enumerate(lowerCamelCase_ ):
A : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase_, in_channels=lowerCamelCase_, channels=lowerCamelCase_ )
self.blocks.append(lowerCamelCase_ )
self.add_module(str(lowerCamelCase_ ), lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Any = []
for ppm in self.blocks:
A : List[str] = ppm(lowerCamelCase_ )
A : Dict = nn.functional.interpolate(
lowerCamelCase_, size=x.size()[2:], mode="""bilinear""", align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase_ )
return ppm_outs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Optional[Any] = config
A : int = config.pool_scales # e.g. (1, 2, 3, 6)
A : Optional[int] = in_channels
A : Any = config.hidden_size
A : int = False
A : List[str] = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
# PSP Module
A : str = UperNetPyramidPoolingModule(
self.pool_scales, self.in_channels[-1], self.channels, align_corners=self.align_corners, )
A : List[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels, self.channels, kernel_size=3, padding=1, )
# FPN Module
A : List[str] = nn.ModuleList()
A : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
A : Optional[int] = UperNetConvModule(lowerCamelCase_, self.channels, kernel_size=1 )
A : Any = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1 )
self.lateral_convs.append(lowerCamelCase_ )
self.fpn_convs.append(lowerCamelCase_ )
A : Dict = UperNetConvModule(
len(self.in_channels ) * self.channels, self.channels, kernel_size=3, padding=1, )
def _lowerCAmelCase ( self ):
self.apply(self._init_weights )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = inputs[-1]
A : List[Any] = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase_ ) )
A : str = torch.cat(lowerCamelCase_, dim=1 )
A : Tuple = self.bottleneck(lowerCamelCase_ )
return output
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# build laterals
A : Optional[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase_ ) )
# build top-down path
A : List[str] = len(lowerCamelCase_ )
for i in range(used_backbone_levels - 1, 0, -1 ):
A : int = laterals[i - 1].shape[2:]
A : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=lowerCamelCase_, mode="""bilinear""", align_corners=self.align_corners )
# build outputs
A : Dict = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1, 0, -1 ):
A : Optional[Any] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="""bilinear""", align_corners=self.align_corners )
A : str = torch.cat(lowerCamelCase_, dim=1 )
A : List[str] = self.fpn_bottleneck(lowerCamelCase_ )
A : Optional[int] = self.classifier(lowerCamelCase_ )
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = 2, lowerCamelCase__ = 3, lowerCamelCase__ = 1 ):
super().__init__()
A : Union[str, Any] = config
A : Optional[int] = config.auxiliary_in_channels
A : Any = config.auxiliary_channels
A : int = config.auxiliary_num_convs
A : List[str] = config.auxiliary_concat_input
A : int = in_index
A : List[str] = (kernel_size // 2) * dilation
A : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels, self.channels, kernel_size=lowerCamelCase_, padding=lowerCamelCase_, dilation=lowerCamelCase_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels, self.channels, kernel_size=lowerCamelCase_, padding=lowerCamelCase_, dilation=lowerCamelCase_ ) )
if self.num_convs == 0:
A : Dict = nn.Identity()
else:
A : Tuple = nn.Sequential(*lowerCamelCase_ )
if self.concat_input:
A : List[Any] = UperNetConvModule(
self.in_channels + self.channels, self.channels, kernel_size=lowerCamelCase_, padding=kernel_size // 2 )
A : Any = nn.Convad(self.channels, config.num_labels, kernel_size=1 )
def _lowerCAmelCase ( self ):
self.apply(self._init_weights )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, nn.Convad ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# just take the relevant feature maps
A : Optional[int] = encoder_hidden_states[self.in_index]
A : Union[str, Any] = self.convs(lowerCamelCase_ )
if self.concat_input:
A : Any = self.conv_cat(torch.cat([hidden_states, output], dim=1 ) )
A : Optional[int] = self.classifier(lowerCamelCase_ )
return output
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = UperNetConfig
__lowerCamelCase : List[Any] = "pixel_values"
__lowerCamelCase : Optional[Any] = True
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowerCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=False ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
A : Optional[int] = value
SCREAMING_SNAKE_CASE_:List[str] = R"""\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
SCREAMING_SNAKE_CASE_:Tuple = R"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"""
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
super().__init__(lowerCamelCase_ )
A : List[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
A : Union[str, Any] = UperNetHead(lowerCamelCase_, in_channels=self.backbone.channels )
A : Union[str, Any] = UperNetFCNHead(lowerCamelCase_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=lowerCamelCase_, config_class=_CONFIG_FOR_DOC )
def _lowerCAmelCase ( self, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
A : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
A : Any = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase_, output_hidden_states=lowerCamelCase_, output_attentions=lowerCamelCase_ )
A : Optional[int] = outputs.feature_maps
A : Any = self.decode_head(lowerCamelCase_ )
A : Optional[Any] = nn.functional.interpolate(lowerCamelCase_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=lowerCamelCase_ )
A : List[Any] = None
if self.auxiliary_head is not None:
A : int = self.auxiliary_head(lowerCamelCase_ )
A : Tuple = nn.functional.interpolate(
lowerCamelCase_, size=pixel_values.shape[2:], mode="""bilinear""", align_corners=lowerCamelCase_ )
A : Optional[int] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
A : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
A : Tuple = loss_fct(lowerCamelCase_, lowerCamelCase_ )
A : Union[str, Any] = loss_fct(lowerCamelCase_, lowerCamelCase_ )
A : List[str] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
A : Any = (logits,) + outputs[1:]
else:
A : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase_, logits=lowerCamelCase_, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 662 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _a ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = BertJapaneseTokenizer
A_ = False
A_ = True
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().setUp()
UpperCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。'
UpperCamelCase_ = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.get_input_output_texts(lowerCamelCase_ )
UpperCamelCase_ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
UpperCamelCase_ = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return text, ids
def _UpperCAmelCase ( self ) -> str:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> List[str]:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> Optional[int]:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file )
UpperCamelCase_ = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCamelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCamelCase_ )
UpperCamelCase_ = 'こんにちは、世界。\nこんばんは、世界。'
UpperCamelCase_ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCamelCase_ , 'wb' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as handle:
UpperCamelCase_ = pickle.load(lowerCamelCase_ )
UpperCamelCase_ = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _UpperCAmelCase ( self ) -> int:
try:
UpperCamelCase_ = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
try:
UpperCamelCase_ = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MecabTokenizer(do_lower_case=lowerCamelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _UpperCAmelCase ( self ) -> int:
try:
UpperCamelCase_ = MecabTokenizer(
do_lower_case=lowerCamelCase_ , normalize_text=lowerCamelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MecabTokenizer(normalize_text=lowerCamelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCamelCase_ )
UpperCamelCase_ = 'こんにちは、世界。\nこんばんは、世界。'
UpperCamelCase_ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCamelCase_ , 'wb' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as handle:
UpperCamelCase_ = pickle.load(lowerCamelCase_ )
UpperCamelCase_ = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_sudachi
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = SudachiTokenizer(do_lower_case=lowerCamelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = SudachiTokenizer(normalize_text=lowerCamelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = SudachiTokenizer(trim_whitespace=lowerCamelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCamelCase_ )
UpperCamelCase_ = 'こんにちは、世界。\nこんばんは、世界。'
UpperCamelCase_ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCamelCase_ = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCamelCase_ , 'wb' ) as handle:
pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as handle:
UpperCamelCase_ = pickle.load(lowerCamelCase_ )
UpperCamelCase_ = tokenizer_new.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@require_jumanpp
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = JumanppTokenizer(do_lower_case=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = JumanppTokenizer(normalize_text=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = JumanppTokenizer(trim_whitespace=lowerCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
UpperCamelCase_ = {}
for i, token in enumerate(lowerCamelCase_ ):
UpperCamelCase_ = i
UpperCamelCase_ = WordpieceTokenizer(vocab=lowerCamelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
UpperCamelCase_ = tokenizer.subword_tokenizer
UpperCamelCase_ = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCamelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
UpperCamelCase_ = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCamelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
UpperCamelCase_ = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCamelCase_ )
UpperCamelCase_ = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCamelCase_ )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _a ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = BertJapaneseTokenizer
A_ = False
def _UpperCAmelCase ( self ) -> str:
super().setUp()
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> str:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCamelCase_ )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[Any]:
UpperCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。'
UpperCamelCase_ = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def _UpperCAmelCase ( self ) -> Tuple:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> str:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
UpperCamelCase_ = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCamelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
UpperCamelCase_ = {}
for i, token in enumerate(lowerCamelCase_ ):
UpperCamelCase_ = i
UpperCamelCase_ = CharacterTokenizer(vocab=lowerCamelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
UpperCamelCase_ = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCamelCase_ )
UpperCamelCase_ = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCamelCase_ )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
UpperCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = 'cl-tohoku/bert-base-japanese'
UpperCamelCase_ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
UpperCamelCase_ = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 23 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase):
raise ValueError("Input must be an integer")
if input_num <= 0:
raise ValueError("Input must be positive")
return sum(
divisor for divisor in range(1 , input_num // 2 + 1) if input_num % divisor == 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 557 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
return F"""gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase_ ) for s in shape] )}.npy"""
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A ( self , UpperCAmelCase__=0 , UpperCAmelCase__=(4, 4, 64, 64) , UpperCAmelCase__=False ):
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_ ) ) , dtype=lowerCamelCase_ )
return image
def __A ( self , UpperCAmelCase__=False , UpperCAmelCase__="CompVis/stable-diffusion-v1-4" ):
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = "bf16" if fpaa else None
A__ , A__ = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase_ , subfolder="unet" , dtype=lowerCamelCase_ , revision=lowerCamelCase_ )
return model, params
def __A ( self , UpperCAmelCase__=0 , UpperCAmelCase__=(4, 77, 768) , UpperCAmelCase__=False ):
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_ ) ) , dtype=lowerCamelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1_000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ , A__ = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=lowerCamelCase_ )
A__ = self.get_latents(lowerCamelCase_ , fpaa=lowerCamelCase_ )
A__ = self.get_encoder_hidden_states(lowerCamelCase_ , fpaa=lowerCamelCase_ )
A__ = model.apply(
{"params": params} , lowerCamelCase_ , jnp.array(lowerCamelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase_ , ).sample
assert sample.shape == latents.shape
A__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A__ = jnp.array(lowerCamelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1_000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ , A__ = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=lowerCamelCase_ )
A__ = self.get_latents(lowerCamelCase_ , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase_ )
A__ = self.get_encoder_hidden_states(lowerCamelCase_ , shape=(4, 77, 1_024) , fpaa=lowerCamelCase_ )
A__ = model.apply(
{"params": params} , lowerCamelCase_ , jnp.array(lowerCamelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase_ , ).sample
assert sample.shape == latents.shape
A__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A__ = jnp.array(lowerCamelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2 )
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'AutoImageProcessor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
_lowercase = self.image_processor
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowercase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
_lowercase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
_lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def _UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def _UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 398 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase (_lowercase ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
for rt in rc.restypes:
__lowerCamelCase : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCamelCase : List[str] = {name: i for i, name in enumerate(_lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
__lowerCamelCase : Dict = torch.tensor(
_lowercase, dtype=torch.intaa, device=protein["""aatype"""].device, )
__lowerCamelCase : Union[str, Any] = torch.tensor(
_lowercase, dtype=torch.intaa, device=protein["""aatype"""].device, )
__lowerCamelCase : List[Any] = torch.tensor(
_lowercase, dtype=torch.floataa, device=protein["""aatype"""].device, )
__lowerCamelCase : int = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCamelCase : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
__lowerCamelCase : Any = restype_atomaa_mask[protein_aatype]
__lowerCamelCase : List[str] = residx_atomaa_mask
__lowerCamelCase : Any = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCamelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCamelCase : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCamelCase : Union[str, Any] = torch.zeros([21, 37], dtype=torch.floataa, device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCamelCase : Dict = rc.restype_atoa[restype_letter]
__lowerCamelCase : int = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCamelCase : int = rc.atom_order[atom_name]
__lowerCamelCase : Tuple = 1
__lowerCamelCase : int = restype_atomaa_mask[protein_aatype]
__lowerCamelCase : List[str] = residx_atomaa_mask
return protein
def __lowercase (_lowercase ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase : Optional[int] = tree_map(lambda _lowercase : torch.tensor(_lowercase, device=batch["""aatype"""].device ), _lowercase, np.ndarray )
__lowerCamelCase : int = tensor_tree_map(lambda _lowercase : np.array(_lowercase ), make_atomaa_masks(_lowercase ) )
return out
| 150 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] = "AAPL"):
lowerCamelCase : str = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCamelCase : Tuple = BeautifulSoup(requests.get(_lowercase).text , 'html.parser')
lowerCamelCase : str = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_).find('span').text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 320 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = FunnelTokenizer
lowerCAmelCase_ = FunnelTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : List[Any] ):
super().setUp()
_lowerCamelCase : Dict = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase_ ( self : int,**__A : List[str] ):
return FunnelTokenizer.from_pretrained(self.tmpdirname,**lowerCamelCase_ )
def lowerCamelCase_ ( self : Any,**__A : List[Any] ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname,**lowerCamelCase_ )
def lowerCamelCase_ ( self : Any,__A : Union[str, Any] ):
_lowerCamelCase : List[str] = "UNwant\u00E9d,running"
_lowerCamelCase : int = "unwanted, running"
return input_text, output_text
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ),[7, 4, 5, 1_0, 8, 9] )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
_lowerCamelCase : Any = tokenizer("UNwant\u00E9d,running" )
_lowerCamelCase : Tuple = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"],[2] + [0] * sentence_len )
_lowerCamelCase : Optional[int] = tokenizer("UNwant\u00E9d,running","UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"],[2] + [0] * sentence_len + [1] * sentence_len ) | 44 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a_ : List[str] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a_ : List[Any] = [0, 2_5, 5_0]
a_ : Any = [2_5, 5_0, 7_5]
a_ : str = fuzz.membership.trimf(X, abca)
a_ : int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a_ : List[str] = np.ones(7_5)
a_ : List[str] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
a_ : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a_ : Optional[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a_ : int = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a_ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a_ : int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a_ : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a_ : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a_ : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 623 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowercase ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
A_ : Optional[int] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
A_ : int = PipelineDataFormat.from_str(
format=_lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowercase , _lowercase )
class _lowerCAmelCase ( lowerCamelCase_ ):
def __init__(self , lowercase , lowercase ):
A_ : List[Any] = nlp
A_ : Dict = reader
@staticmethod
def _a (lowercase ):
A_ : int = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowerCamelCase_ , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowerCamelCase_ , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowerCamelCase_ , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowerCamelCase_ , help="""Name or path to the model\'s config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase_ , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowerCamelCase_ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowerCamelCase_ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowerCamelCase_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowerCamelCase_ )
def _a (self ):
A_, A_ : Optional[Any] = self._nlp, []
for entry in self._reader:
A_ : Tuple = nlp(**lowerCamelCase_ ) if self._reader.is_multi_columns else nlp(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
outputs.append(lowerCamelCase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
A_ : int = self._reader.save_binary(lowerCamelCase_ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowerCamelCase_ ) | 667 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = """timm_backbone"""
def __init__( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_)
__lowercase =backbone
__lowercase =num_channels
__lowercase =features_only
__lowercase =use_pretrained_backbone
__lowercase =True
__lowercase =out_indices if out_indices is not None else (-1,)
| 474 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=24, lowerCamelCase__=2, lowerCamelCase__=6, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=None, lowerCamelCase__=1000, ):
A : int = parent
A : Union[str, Any] = batch_size
A : Optional[Any] = seq_length
A : Union[str, Any] = is_training
A : List[Any] = use_input_mask
A : Tuple = use_token_type_ids
A : Union[str, Any] = use_labels
A : int = vocab_size
A : Any = hidden_size
A : int = num_hidden_layers
A : List[Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : List[Any] = hidden_act
A : int = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : List[str] = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = type_sequence_label_size
A : Optional[int] = initializer_range
A : List[Any] = num_labels
A : Optional[Any] = scope
A : int = range_bbox
def _lowerCAmelCase ( self ):
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[str] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A : Tuple = bbox[i, j, 3]
A : str = bbox[i, j, 1]
A : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A : List[Any] = bbox[i, j, 2]
A : Union[str, Any] = bbox[i, j, 0]
A : int = t
A : Dict = None
if self.use_input_mask:
A : str = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
A : int = None
if self.use_token_type_ids:
A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Union[str, Any] = None
A : Optional[Any] = None
if self.use_labels:
A : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : Dict = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : Any = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowerCAmelCase ( self ):
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Optional[Any] = LiltModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A : Union[str, Any] = model(lowerCamelCase_, bbox=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
A : List[str] = model(lowerCamelCase_, bbox=lowerCamelCase_, token_type_ids=lowerCamelCase_ )
A : Dict = model(lowerCamelCase_, bbox=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Dict = self.num_labels
A : List[str] = LiltForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A : Optional[int] = model(
lowerCamelCase_, bbox=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, ):
A : Tuple = LiltForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A : int = model(
lowerCamelCase_, bbox=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self ):
A : Any = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Union[str, Any] = config_and_inputs
A : List[Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : int = False
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
return True
def _lowerCAmelCase ( self ):
A : List[Any] = LiltModelTester(self )
A : List[str] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A : List[str] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
def _lowerCAmelCase ( self ):
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
@slow
def _lowerCAmelCase ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : List[Any] = LiltModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Union[str, Any] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ )
A : Any = torch.tensor([[1, 2]], device=lowerCamelCase_ )
A : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=lowerCamelCase_ )
# forward pass
with torch.no_grad():
A : List[Any] = model(input_ids=lowerCamelCase_, bbox=lowerCamelCase_ )
A : List[str] = torch.Size([1, 2, 768] )
A : Optional[Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=lowerCamelCase_, )
self.assertTrue(outputs.last_hidden_state.shape, lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], lowerCamelCase_, atol=1e-3 ) )
| 662 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
snake_case__ : int = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
snake_case__ : int = logging.WARNING
def _snake_case ():
UpperCamelCase_ = os.getenv('DATASETS_VERBOSITY' , _lowercase)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys()) }""")
return _default_log_level
def _snake_case ():
return __name__.split('.')[0]
def _snake_case ():
return logging.getLogger(_get_library_name())
def _snake_case ():
UpperCamelCase_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level())
def _snake_case ():
UpperCamelCase_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET)
def _snake_case (__lowercase = None):
if name is None:
UpperCamelCase_ = _get_library_name()
return logging.getLogger(_lowercase)
def _snake_case ():
return _get_library_root_logger().getEffectiveLevel()
def _snake_case (__lowercase):
_get_library_root_logger().setLevel(_lowercase)
def _snake_case ():
return set_verbosity(_lowercase)
def _snake_case ():
return set_verbosity(_lowercase)
def _snake_case ():
return set_verbosity(_lowercase)
def _snake_case ():
return set_verbosity(_lowercase)
def _snake_case ():
UpperCamelCase_ = False
def _snake_case ():
UpperCamelCase_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int: # pylint: disable=unused-argument
UpperCamelCase_ = args[0] if args else None
def __iter__( self ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self , _UpperCAmelCase ) -> Union[str, Any]:
def empty_fn(*_UpperCAmelCase , **_UpperCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[Any]:
return self
def __exit__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
return
snake_case__ : Optional[int] = True
class _a :
"""simple docstring"""
def __call__( self , *_UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCamelCase_ , **lowerCamelCase_ )
else:
return EmptyTqdm(*lowerCamelCase_ , **lowerCamelCase_ )
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase_ , **lowerCamelCase_ )
def _UpperCAmelCase ( self ) -> Tuple:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case__ : List[Any] = _tqdm_cls()
def _snake_case ():
global _tqdm_active
return bool(_tqdm_active)
def _snake_case ():
global _tqdm_active
UpperCamelCase_ = True
def _snake_case ():
global _tqdm_active
UpperCamelCase_ = False
| 23 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
))
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
))
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
))
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
))
return embed
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Dict = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
))
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight'))
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias'))
return attention_weights
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', "stage2.cls_token"))
return token
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : int = []
head.append(("layernorm.weight", "norm.weight"))
head.append(("layernorm.bias", "norm.bias"))
head.append(("classifier.weight", "head.weight"))
head.append(("classifier.bias", "head.bias"))
return head
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = "imagenet-1k-id2label.json"
__UpperCamelCase : Dict = 1_000
__UpperCamelCase : str = "huggingface/label-files"
__UpperCamelCase : Tuple = num_labels
__UpperCamelCase : int = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type="dataset")) , "r"))
__UpperCamelCase : Tuple = {int(_lowercase): v for k, v in idalabel.items()}
__UpperCamelCase : Optional[int] = idalabel
__UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : int = CvtConfig(num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase)
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1)[-1][4:6] == "13":
__UpperCamelCase : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21":
__UpperCamelCase : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__UpperCamelCase : str = [2, 2, 20]
__UpperCamelCase : Dict = [3, 12, 16]
__UpperCamelCase : List[Any] = [192, 768, 1_024]
__UpperCamelCase : List[Any] = CvtForImageClassification(_lowercase)
__UpperCamelCase : str = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
__UpperCamelCase : Optional[int] = image_size
__UpperCamelCase : List[Any] = torch.load(_lowercase , map_location=torch.device("cpu"))
__UpperCamelCase : Union[str, Any] = OrderedDict()
__UpperCamelCase : Dict = []
for idx in range(len(config.depth)):
if config.cls_token[idx]:
__UpperCamelCase : int = list_of_state_dict + cls_token(_lowercase)
__UpperCamelCase : int = list_of_state_dict + embeddings(_lowercase)
for cnt in range(config.depth[idx]):
__UpperCamelCase : Tuple = list_of_state_dict + attention(_lowercase , _lowercase)
__UpperCamelCase : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowercase)
for i in range(len(_lowercase)):
__UpperCamelCase : int = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowercase)
model.save_pretrained(_lowercase)
image_processor.save_pretrained(_lowercase)
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 557 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class UpperCamelCase ( lowerCamelCase_ ):
lowerCAmelCase : Union[str, Any] = """layoutlmv3"""
def __init__( self , UpperCAmelCase__=50_265 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=1_024 , UpperCAmelCase__=128 , UpperCAmelCase__=128 , UpperCAmelCase__=True , UpperCAmelCase__=32 , UpperCAmelCase__=128 , UpperCAmelCase__=64 , UpperCAmelCase__=256 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=224 , UpperCAmelCase__=3 , UpperCAmelCase__=16 , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
A__ = max_ad_position_embeddings
A__ = coordinate_size
A__ = shape_size
A__ = has_relative_attention_bias
A__ = rel_pos_bins
A__ = max_rel_pos
A__ = has_spatial_attention_bias
A__ = rel_ad_pos_bins
A__ = max_rel_ad_pos
A__ = text_embed
A__ = visual_embed
A__ = input_size
A__ = num_channels
A__ = patch_size
A__ = classifier_dropout
class UpperCamelCase ( lowerCamelCase_ ):
lowerCAmelCase : int = version.parse("""1.12""" )
@property
def __A ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def __A ( self ):
return 1e-5
@property
def __A ( self ):
return 12
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = -1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = 3 , UpperCAmelCase__ = 40 , UpperCAmelCase__ = 40 , ):
setattr(processor.image_processor , "apply_ocr" , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
A__ = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
A__ = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A__ = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A__ = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 491 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowercase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_lowercase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_lowercase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowercase = CLIPTextModel(lowerCamelCase_ )
_lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowercase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowercase = torch.manual_seed(lowerCamelCase_ )
else:
_lowercase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowercase = 2
_lowercase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , )
_lowercase = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
_lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowercase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCAmelCase ):
if isinstance(lowerCamelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowercase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
_lowercase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase_ )
torch.manual_seed(0 )
_lowercase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowercase = CLIPTextModel(lowerCamelCase_ )
_lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowercase = MultiControlNetModel([controlneta, controlneta] )
_lowercase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowercase = torch.manual_seed(lowerCamelCase_ )
else:
_lowercase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowercase = 2
_lowercase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase_ , device=torch.device(lowerCamelCase_ ) , ),
]
_lowercase = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
_lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.get_dummy_components()
_lowercase = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
_lowercase = 10.0
_lowercase = 4
_lowercase = self.get_dummy_inputs(lowerCamelCase_ )
_lowercase = steps
_lowercase = scale
_lowercase = pipe(**lowerCamelCase_ )[0]
_lowercase = self.get_dummy_inputs(lowerCamelCase_ )
_lowercase = steps
_lowercase = scale
_lowercase = pipe(**lowerCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowercase = self.get_dummy_inputs(lowerCamelCase_ )
_lowercase = steps
_lowercase = scale
_lowercase = pipe(**lowerCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowercase = self.get_dummy_inputs(lowerCamelCase_ )
_lowercase = steps
_lowercase = scale
_lowercase = pipe(**lowerCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = self.get_dummy_components()
_lowercase = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_lowercase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCamelCase_ , controlnet=lowerCamelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowercase = """evil space-punk bird"""
_lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
_lowercase = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
_lowercase = pipe(
lowerCamelCase_ , lowerCamelCase_ , control_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
_lowercase = output.images[0]
assert image.shape == (512, 512, 3)
_lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 398 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase__ :List[str] = """base_with_context"""
def __lowercase (_lowercase, _lowercase ) -> List[str]:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
__lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCamelCase : List[str] = weights[f"layers_{lyr_num}"]
__lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : Optional[Any] = ly_weight["""attention"""]
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowercase (_lowercase, _lowercase ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=_lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCamelCase : int = weights[f"layers_{lyr_num}"]
__lowerCamelCase : Optional[int] = ly_weight["""attention"""]
__lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __lowercase (_lowercase, _lowercase ) -> Dict:
"""simple docstring"""
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
__lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ), requires_grad=_lowercase )
__lowerCamelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowerCamelCase : List[str] = weights[f"layers_{lyr_num}"]
__lowerCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__lowerCamelCase : List[str] = ly_weight["""self_attention"""]
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowerCamelCase : Optional[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __lowercase (_lowercase ) -> Any:
"""simple docstring"""
__lowerCamelCase : int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowerCamelCase : List[Any] = jnp.tree_util.tree_map(onp.array, _lowercase )
__lowerCamelCase : Any = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
__lowerCamelCase : Tuple = os.path.join(args.checkpoint_path, """..""", """config.gin""" )
__lowerCamelCase : Any = inference.parse_training_gin_file(_lowercase, _lowercase )
__lowerCamelCase : Union[str, Any] = inference.InferenceModel(args.checkpoint_path, _lowercase )
__lowerCamelCase : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""", variance_type="""fixed_large""" )
__lowerCamelCase : Union[str, Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="""gated-gelu""", )
__lowerCamelCase : Optional[int] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length["""targets_context"""], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj="""gated-gelu""", )
__lowerCamelCase : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length["""targets_context"""], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
__lowerCamelCase : List[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""], _lowercase )
__lowerCamelCase : Union[str, Any] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""], _lowercase )
__lowerCamelCase : Union[str, Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""], _lowercase )
__lowerCamelCase : List[str] = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
__lowerCamelCase : Dict = SpectrogramDiffusionPipeline(
notes_encoder=_lowercase, continuous_encoder=_lowercase, decoder=_lowercase, scheduler=_lowercase, melgan=_lowercase, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase__ :Optional[Any] = parser.parse_args()
main(args)
| 150 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
A = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
A = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
A = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ), codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'], )
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : str = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowerCamelCase : int = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowerCamelCase : Any = evaluate(dataset=lowerCamelCase_, predictions=lowerCamelCase_ )
return score
| 320 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : List[Any],**__A : Tuple ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : List[Any],*__A : List[str],**__A : Optional[int] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : str,*__A : Dict,**__A : Union[str, Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Any,*__A : Optional[int],**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : Dict,**__A : Any ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Tuple,*__A : Optional[Any],**__A : Dict ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Tuple,*__A : List[Any],**__A : Optional[Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : List[Any],**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Union[str, Any],*__A : str,**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Optional[Any],*__A : str,**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Dict,*__A : Optional[int],**__A : Optional[int] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : str,**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Union[str, Any],*__A : List[Any],**__A : List[Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Tuple,*__A : Any,**__A : Optional[Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : List[Any],*__A : Union[str, Any],**__A : str ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Dict,*__A : int,**__A : Dict ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : Union[str, Any],**__A : Tuple ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : str,*__A : List[str],**__A : List[Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : str,*__A : int,**__A : Any ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Any,*__A : int,**__A : List[Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Tuple,*__A : Tuple,**__A : Optional[int] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : Union[str, Any],**__A : Any ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Any,*__A : Union[str, Any],**__A : List[Any] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : int,*__A : List[str],**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : List[str],*__A : Union[str, Any],**__A : List[str] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : str,*__A : Union[str, Any],**__A : Dict ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Optional[Any],*__A : List[str],**__A : Any ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Any,*__A : Optional[int],**__A : str ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : List[Any],*__A : Dict,**__A : int ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Any,*__A : str,**__A : List[str] ):
requires_backends(self,["sentencepiece"] )
class UpperCAmelCase__ ( metaclass=lowerCamelCase_ ):
lowerCAmelCase_ = ['sentencepiece']
def __init__( self : Union[str, Any],*__A : Optional[Any],**__A : Dict ):
requires_backends(self,["sentencepiece"] ) | 44 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 623 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase :List[str] = random.Random()
def a ( lowerCamelCase__ , lowerCamelCase__=1.0 , lowerCamelCase__=None , lowerCamelCase__=None ):
'''simple docstring'''
if rng is None:
A_ : Any = global_rng
A_ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=400 , lowercase=2000 , lowercase=2048 , lowercase=128 , lowercase=1 , lowercase=512 , lowercase=30 , lowercase=44100 , ):
A_ : int = parent
A_ : Tuple = batch_size
A_ : Any = min_seq_length
A_ : Optional[Any] = max_seq_length
A_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ : Optional[Any] = spectrogram_length
A_ : Optional[Any] = feature_size
A_ : str = num_audio_channels
A_ : int = hop_length
A_ : Dict = chunk_length
A_ : Dict = sampling_rate
def _a (self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _a (self , lowercase=False , lowercase=False ):
def _flatten(lowercase ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
A_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ : Tuple = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = TvltFeatureExtractor
def _a (self ):
A_ : str = TvltFeatureExtractionTester(self )
def _a (self ):
A_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) )
def _a (self ):
A_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = feat_extract_first.save_pretrained(lowerCamelCase_ )[0]
check_json_file_has_correct_format(lowerCamelCase_ )
A_ : Optional[Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ )
A_ : List[Any] = feat_extract_first.to_dict()
A_ : List[str] = feat_extract_second.to_dict()
A_ : List[Any] = dict_first.pop("""mel_filters""" )
A_ : str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def _a (self ):
A_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = os.path.join(lowerCamelCase_ , """feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase_ )
A_ : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ )
A_ : str = feat_extract_first.to_dict()
A_ : Dict = feat_extract_second.to_dict()
A_ : Union[str, Any] = dict_first.pop("""mel_filters""" )
A_ : Optional[int] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def _a (self ):
# Initialize feature_extractor
A_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ : List[Any] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
A_ : Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A_ : Union[str, Any] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A_ : int = feature_extractor(
lowerCamelCase_ , return_tensors="""np""" , sampling_rate=44100 , mask_audio=lowerCamelCase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A_ : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ : Tuple = np.asarray(lowerCamelCase_ )
A_ : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _a (self , lowercase ):
A_ : Dict = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A_ : Tuple = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _a (self ):
A_ : Optional[Any] = self._load_datasamples(1 )
A_ : Dict = TvltFeatureExtractor()
A_ : List[str] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
A_ : Tuple = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1E-4 ) ) | 667 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """▁"""
lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCamelCase = {"""vinai/bartpho-syllable""": 1024}
class _UpperCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : Optional[Any]="</s>" , _lowerCAmelCase : List[Any]="</s>" , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[Any]="<pad>" , _lowerCAmelCase : Optional[int]="<mask>" , _lowerCAmelCase : List[str] = None , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
__lowercase =vocab_file
__lowercase =monolingual_vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCamelCase_))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowercase ={}
__lowercase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_) not in self.fairseq_tokens_to_ids:
__lowercase =cnt
cnt += 1
with open(lowerCamelCase_ , 'r' , encoding='utf-8') as f:
for line in f.readlines():
__lowercase =line.strip().split()[0]
__lowercase =len(self.fairseq_tokens_to_ids)
if str(lowerCamelCase_) not in self.fairseq_tokens_to_ids:
__lowercase =len(self.fairseq_tokens_to_ids)
__lowercase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str]):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
__lowercase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def __lowerCamelCase ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase =[self.cls_token_id]
__lowercase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : List[str] = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_)) + [1]
return [1] + ([0] * len(lowerCamelCase_)) + [1, 1] + ([0] * len(lowerCamelCase_)) + [1]
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : int):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =''.join(lowerCamelCase_).replace(lowerCamelCase_ , ' ').strip()
return out_string
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple = None):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowercase =os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
lowerCamelCase_) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(lowerCamelCase_ , 'w' , encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(lowerCamelCase_)} \n""")
return out_vocab_file, out_monolingual_vocab_file
| 474 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Any = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE_:Tuple = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_:Any = 0
SCREAMING_SNAKE_CASE_:Dict = 1
SCREAMING_SNAKE_CASE_:Optional[int] = 2
SCREAMING_SNAKE_CASE_:Any = 3
SCREAMING_SNAKE_CASE_:Tuple = 4
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = "left"
def __init__( self, lowerCamelCase__, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__="<sep>", lowerCamelCase__="<pad>", lowerCamelCase__="<cls>", lowerCamelCase__="<mask>", lowerCamelCase__=["<eop>", "<eod>"], lowerCamelCase__ = None, **lowerCamelCase__, ):
# Mask token behave like a normal word, i.e. include the space before it
A : Optional[int] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_, remove_space=lowerCamelCase_, keep_accents=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
A : int = 3
A : Union[str, Any] = do_lower_case
A : List[str] = remove_space
A : Optional[int] = keep_accents
A : str = vocab_file
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
@property
def _lowerCAmelCase ( self ):
return len(self.sp_model )
def _lowerCAmelCase ( self ):
A : int = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A : Any = self.__dict__.copy()
A : str = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : Union[str, Any] = {}
A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.remove_space:
A : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A : List[Any] = inputs
A : Union[str, Any] = outputs.replace("""``""", """\"""" ).replace("""\'\'""", """\"""" )
if not self.keep_accents:
A : int = unicodedata.normalize("""NFKD""", lowerCamelCase_ )
A : Dict = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase_ )] )
if self.do_lower_case:
A : Optional[Any] = outputs.lower()
return outputs
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.preprocess_text(lowerCamelCase_ )
A : int = self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
A : Dict = []
for piece in pieces:
if len(lowerCamelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_, """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A : Any = cur_pieces[1:]
else:
A : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase_ )
else:
new_pieces.append(lowerCamelCase_ )
return new_pieces
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.PieceToId(lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.IdToPiece(lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : int = """""".join(lowerCamelCase_ ).replace(lowerCamelCase_, """ """ ).strip()
return out_string
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = None, lowerCamelCase__ = True, **lowerCamelCase__, ):
A : List[str] = kwargs.pop("""use_source_tokenizer""", lowerCamelCase_ )
A : Dict = self.convert_ids_to_tokens(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A : List[Any] = []
A : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
A : Optional[Any] = []
sub_texts.append(lowerCamelCase_ )
else:
current_sub_text.append(lowerCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A : List[str] = """""".join(lowerCamelCase_ )
A : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A : List[Any] = self.clean_up_tokenization(lowerCamelCase_ )
return clean_text
else:
return text
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Optional[Any] = [self.sep_token_id]
A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1, 1]
return ([0] * len(lowerCamelCase_ )) + [1, 1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : int = [self.sep_token_id]
A : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : int = os.path.join(
lowerCamelCase_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, """wb""" ) as fi:
A : Tuple = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 662 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _a ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 42
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=("DownEncoderBlock2D",) , _UpperCAmelCase=(64,) , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase="silu" , _UpperCAmelCase=True , ) -> Tuple:
super().__init__()
UpperCamelCase_ = layers_per_block
UpperCamelCase_ = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase_ = None
UpperCamelCase_ = nn.ModuleList([] )
# down
UpperCamelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
UpperCamelCase_ = output_channel
UpperCamelCase_ = block_out_channels[i]
UpperCamelCase_ = i == len(lowerCamelCase_ ) - 1
UpperCamelCase_ = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
UpperCamelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
UpperCamelCase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
UpperCamelCase_ = nn.SiLU()
UpperCamelCase_ = 2 * out_channels if double_z else out_channels
UpperCamelCase_ = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
UpperCamelCase_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Any:
UpperCamelCase_ = x
UpperCamelCase_ = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_UpperCAmelCase ):
def custom_forward(*_UpperCAmelCase ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase_ = down_block(lowerCamelCase_ )
# middle
UpperCamelCase_ = self.mid_block(lowerCamelCase_ )
# post-process
UpperCamelCase_ = self.conv_norm_out(lowerCamelCase_ )
UpperCamelCase_ = self.conv_act(lowerCamelCase_ )
UpperCamelCase_ = self.conv_out(lowerCamelCase_ )
return sample
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=("UpDecoderBlock2D",) , _UpperCAmelCase=(64,) , _UpperCAmelCase=2 , _UpperCAmelCase=32 , _UpperCAmelCase="silu" , _UpperCAmelCase="group" , ) -> Optional[int]:
super().__init__()
UpperCamelCase_ = layers_per_block
UpperCamelCase_ = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase_ = None
UpperCamelCase_ = nn.ModuleList([] )
UpperCamelCase_ = in_channels if norm_type == 'spatial' else None
# mid
UpperCamelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
UpperCamelCase_ = list(reversed(lowerCamelCase_ ) )
UpperCamelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
UpperCamelCase_ = output_channel
UpperCamelCase_ = reversed_block_out_channels[i]
UpperCamelCase_ = i == len(lowerCamelCase_ ) - 1
UpperCamelCase_ = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
UpperCamelCase_ = output_channel
# out
if norm_type == "spatial":
UpperCamelCase_ = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
UpperCamelCase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
UpperCamelCase_ = nn.SiLU()
UpperCamelCase_ = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
UpperCamelCase_ = False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> str:
UpperCamelCase_ = z
UpperCamelCase_ = self.conv_in(lowerCamelCase_ )
UpperCamelCase_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_UpperCAmelCase ):
def custom_forward(*_UpperCAmelCase ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
UpperCamelCase_ = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
UpperCamelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
UpperCamelCase_ = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
UpperCamelCase_ = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
UpperCamelCase_ = self.conv_norm_out(lowerCamelCase_ )
else:
UpperCamelCase_ = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase_ = self.conv_act(lowerCamelCase_ )
UpperCamelCase_ = self.conv_out(lowerCamelCase_ )
return sample
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="random" , _UpperCAmelCase=False , _UpperCAmelCase=True ) -> Any:
super().__init__()
UpperCamelCase_ = n_e
UpperCamelCase_ = vq_embed_dim
UpperCamelCase_ = beta
UpperCamelCase_ = legacy
UpperCamelCase_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase_ = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase_ = self.used.shape[0]
UpperCamelCase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase_ = self.re_embed
UpperCamelCase_ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCamelCase_ = n_e
UpperCamelCase_ = sane_index_shape
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = inds.shape
assert len(lowerCamelCase_ ) > 1
UpperCamelCase_ = inds.reshape(ishape[0] , -1 )
UpperCamelCase_ = self.used.to(lowerCamelCase_ )
UpperCamelCase_ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase_ = match.argmax(-1 )
UpperCamelCase_ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase_ = self.unknown_index
return new.reshape(lowerCamelCase_ )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ = inds.shape
assert len(lowerCamelCase_ ) > 1
UpperCamelCase_ = inds.reshape(ishape[0] , -1 )
UpperCamelCase_ = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase_ = 0 # simply set to zero
UpperCamelCase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase_ = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
UpperCamelCase_ = self.embedding(lowerCamelCase_ ).view(z.shape )
UpperCamelCase_ = None
UpperCamelCase_ = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase_ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase_ = self.remap_to_used(lowerCamelCase_ )
UpperCamelCase_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase_ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase_ = self.unmap_to_all(lowerCamelCase_ )
UpperCamelCase_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase_ = self.embedding(lowerCamelCase_ )
if shape is not None:
UpperCamelCase_ = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
UpperCamelCase_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False ) -> str:
UpperCamelCase_ = parameters
UpperCamelCase_ , UpperCamelCase_ = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
UpperCamelCase_ = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
UpperCamelCase_ = deterministic
UpperCamelCase_ = torch.exp(0.5 * self.logvar )
UpperCamelCase_ = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase_ = UpperCamelCase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _UpperCAmelCase ( self , _UpperCAmelCase = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase_ = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase_ = self.mean + self.std * sample
return x
def _UpperCAmelCase ( self , _UpperCAmelCase=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=[1, 2, 3] ) -> Optional[int]:
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def _UpperCAmelCase ( self ) -> str:
return self.mean
| 23 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase : Optional[Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowercase : List[str] = 'hopper-medium-v2'
lowercase : Dict = gym.make(env_name)
lowercase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowercase : int = env.reset()
lowercase : Optional[Any] = 0
lowercase : int = 0
lowercase : str = 1000
lowercase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
lowercase , lowercase , lowercase , lowercase : List[Any] = env.step(denorm_actions)
lowercase : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
f" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"Total reward: {total_reward}") | 557 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 0 |
import operator
def UpperCamelCase ( _A : str , _A : int = False , _A : Union[str, Any] = None )-> Union[str, Any]:
"""simple docstring"""
A__ = operator.lt if reverse else operator.gt
A__ = solution or []
if not arr:
return solution
A__ = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
A__ = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A_: Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_: int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
_lowercase = state_dict.pop(_lowercase )
_lowercase = val
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowercase = key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
_lowercase = value
else:
_lowercase = value
return new_state_dict
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[:256, :]
_lowercase = in_proj_bias[:256]
_lowercase = in_proj_weight[256:512, :]
_lowercase = in_proj_bias[256:512]
_lowercase = in_proj_weight[-256:, :]
_lowercase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[:256, :]
_lowercase = in_proj_bias[:256]
_lowercase = in_proj_weight[256:512, :]
_lowercase = in_proj_bias[256:512]
_lowercase = in_proj_weight[-256:, :]
_lowercase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase = in_proj_weight_cross_attn[:256, :]
_lowercase = in_proj_bias_cross_attn[:256]
_lowercase = in_proj_weight_cross_attn[256:512, :]
_lowercase = in_proj_bias_cross_attn[256:512]
_lowercase = in_proj_weight_cross_attn[-256:, :]
_lowercase = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase , _lowercase = image.size
_lowercase = max(_lowercase ,_lowercase )
_lowercase = 800 if """detection""" in checkpoint_url else 1_000
_lowercase = target_max_size / current_max_size
_lowercase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = F.to_tensor(_lowercase )
_lowercase = F.normalize(_lowercase ,mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
_lowercase = torch.hub.load_state_dict_from_url(_lowercase ,map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase ,_lowercase ,_lowercase )
_lowercase = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_lowercase = state_dict.pop(_lowercase )
_lowercase = val
# create HuggingFace model and load state dict
_lowercase = TableTransformerConfig(
backbone="""resnet18""" ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
_lowercase = 15
_lowercase = 2
_lowercase = {0: """table""", 1: """table rotated"""}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
else:
_lowercase = 125
_lowercase = 6
_lowercase = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = DetrImageProcessor(
format="""coco_detection""" ,max_size=800 if """detection""" in checkpoint_url else 1_000 )
_lowercase = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
_lowercase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_lowercase = hf_hub_download(repo_id="""nielsr/example-pdf""" ,repo_type="""dataset""" ,filename=_lowercase )
_lowercase = Image.open(_lowercase ).convert("""RGB""" )
_lowercase = normalize(resize(_lowercase ,_lowercase ) ).unsqueeze(0 )
_lowercase = model(_lowercase )
if "detection" in checkpoint_url:
_lowercase = (1, 15, 3)
_lowercase = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_lowercase = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_lowercase = (1, 125, 7)
_lowercase = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_lowercase = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,_lowercase ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,_lowercase ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_lowercase = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
A_: Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A_: Union[str, Any] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 398 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowercase () -> Optional[int]:
"""simple docstring"""
__lowerCamelCase : Any = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=_lowercase, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=_lowercase, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=_lowercase )
return parser.parse_args()
def __lowercase () -> int:
"""simple docstring"""
__lowerCamelCase : str = parse_args()
# Import training_script as a module.
__lowerCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCamelCase : int = script_fpath.stem
__lowerCamelCase : List[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
__lowerCamelCase : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 150 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( _lowercase ,_lowercase=False ):
"""simple docstring"""
try:
UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase = strtobool(_lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
def _require_spacy_model(_lowercase ):
try:
import spacy # noqa F401
spacy.load(_lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(_lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase )
else:
return test_case
return _require_spacy_model
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_lowercase )
else:
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase = unittest.skip('''test is local''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase )
return test_case
def __snake_case ( _lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase )
return test_case
def __snake_case ( *_lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase = decorator(_lowercase )
setattr(cls ,_lowercase ,_lowercase )
return cls
return decorate
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
pass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = 0
A_ = 1
A_ = 2
@contextmanager
def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ):
"""simple docstring"""
UpperCamelCase = requests.Session().request
def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
UpperCamelCase = timeout
try:
return online_request(_lowercase ,_lowercase ,**_lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase = url
UpperCamelCase = e.args[0]
UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),)
UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,_lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir:
try:
os.chdir(_lowercase )
yield
finally:
os.chdir(_lowercase )
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
"""simple docstring"""
import gc
gc.collect()
UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist()
def __snake_case ( _lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowercase ,*_lowercase ,**_lowercase ):
try:
return func(*_lowercase ,**_lowercase )
except HTTPError as err:
if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ):
pytest.xfail(str(_lowercase ) )
raise err
return decorator.decorator(_wrapper ,_lowercase )
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict:
UpperCamelCase = returncode
UpperCamelCase = stdout
UpperCamelCase = stderr
async def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
while True:
UpperCamelCase = await stream.readline()
if line:
callback(_lowercase )
else:
break
async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(_lowercase ) )
UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase = []
UpperCamelCase = []
def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ):
UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(_lowercase )
if not quiet:
print(_lowercase ,_lowercase ,file=_lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=_lowercase ,)
return _RunOutput(await p.wait() ,_lowercase ,_lowercase )
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ):
"""simple docstring"""
UpperCamelCase = asyncio.get_event_loop()
UpperCamelCase = loop.run_until_complete(
_stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) )
UpperCamelCase = ''' '''.join(_lowercase )
if result.returncode > 0:
UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M )
return int(_lowercase )
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = 2_9500
UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta | 34 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = '▁'
A = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A = {
'google/pegasus-xsum': 512,
}
class __snake_case ( lowerCamelCase_):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PegasusTokenizer
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self, A=None, A=None, A="<pad>", A="</s>", A="<unk>", A="<mask_2>", A="<mask_1>", A=None, A=103, **A, ):
"""simple docstring"""
lowerCamelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
raise TypeError(
F'''additional_special_tokens should be of type {type(lowerCamelCase_ )}, but is'''
F''' {type(lowerCamelCase_ )}''' )
lowerCamelCase : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(lowerCamelCase_ ), self.offset - 1 )
]
if len(set(lowerCamelCase_ ) ) != len(lowerCamelCase_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
lowerCamelCase : Dict = additional_special_tokens_extended
else:
lowerCamelCase : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, pad_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, mask_token=lowerCamelCase_, mask_token_sent=lowerCamelCase_, offset=lowerCamelCase_, additional_special_tokens=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase : int = vocab_file
lowerCamelCase : int = False if not self.vocab_file else True
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase_ ( self, A, A = None, A = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase_ ( self, A, A=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Tuple = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file, lowerCamelCase_ )
return (out_vocab_file,)
| 320 |
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 34 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase_ : int = 'src/transformers'
UpperCAmelCase_ : List[str] = 'docs/source/en'
UpperCAmelCase_ : Dict = '.'
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
with open(_lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : Optional[int] = f.readlines()
# Find the start prompt.
_lowerCamelCase : int = 0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
_lowerCamelCase : List[str] = start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase_ : int = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
UpperCAmelCase_ : Optional[int] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : Tuple = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : int = 2 if text == "✅" or text == "❌" else len(_lowercase )
_lowerCamelCase : List[str] = (width - text_length) // 2
_lowerCamelCase : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCamelCase : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_lowerCamelCase : int = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_lowerCamelCase : Optional[int] = collections.defaultdict(_lowercase )
_lowerCamelCase : Union[str, Any] = collections.defaultdict(_lowercase )
_lowerCamelCase : Union[str, Any] = collections.defaultdict(_lowercase )
_lowerCamelCase : Optional[Any] = collections.defaultdict(_lowercase )
_lowerCamelCase : str = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowercase ):
_lowerCamelCase : Any = None
if attr_name.endswith("Tokenizer" ):
_lowerCamelCase : Any = slow_tokenizers
_lowerCamelCase : Optional[Any] = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_lowerCamelCase : List[str] = fast_tokenizers
_lowerCamelCase : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(_lowercase ) is not None:
_lowerCamelCase : Optional[Any] = tf_models
_lowerCamelCase : List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
_lowerCamelCase : Dict = flax_models
_lowerCamelCase : Union[str, Any] = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
_lowerCamelCase : Optional[Any] = pt_models
_lowerCamelCase : List[Any] = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_name_to_prefix.values():
_lowerCamelCase : int = True
break
# Try again after removing the last word in the name
_lowerCamelCase : int = "".join(camel_case_split(_lowercase )[:-1] )
# Let's build that table!
_lowerCamelCase : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_lowerCamelCase : Optional[int] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_lowerCamelCase : int = [len(_lowercase ) + 2 for c in columns]
_lowerCamelCase : Optional[int] = max([len(_lowercase ) for name in model_names] ) + 2
# Build the table per se
_lowerCamelCase : List[str] = "|" + "|".join([_center_text(_lowercase , _lowercase ) for c, w in zip(_lowercase , _lowercase )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_lowerCamelCase : int = {True: "✅", False: "❌"}
for name in model_names:
_lowerCamelCase : List[Any] = model_name_to_prefix[name]
_lowerCamelCase : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowercase , _lowercase ) for l, w in zip(_lowercase , _lowercase )] ) + "|\n"
return table
def A_ ( _lowerCAmelCase : int=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = _find_text_in_file(
filename=os.path.join(_lowercase , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
_lowerCamelCase : str = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowercase , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 44 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])} | 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 623 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase :Tuple = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
A_ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the reference grid
A_ : Union[str, Any] = 1
A_ : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the action grid
A_ : Optional[int] = init[0]
A_ : Optional[Any] = init[1]
A_ : List[str] = 0
A_ : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
A_ : List[Any] = [[f, g, x, y]]
A_ : Tuple = False # flag that is set when search is complete
A_ : str = False # flag set if we can't find expand
while not found and not resign:
if len(_lowercase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A_ : List[Any] = cell.pop()
A_ : Optional[int] = next_cell[2]
A_ : Tuple = next_cell[3]
A_ : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
A_ : int = True
else:
for i in range(len(_lowercase ) ): # to try out different valid actions
A_ : int = x + DIRECTIONS[i][0]
A_ : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A_ : int = g + cost
A_ : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A_ : str = 1
A_ : Any = i
A_ : Dict = []
A_ : Union[str, Any] = goal[0]
A_ : List[str] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A_ : Union[str, Any] = x - DIRECTIONS[action[x][y]][0]
A_ : List[str] = y - DIRECTIONS[action[x][y]][1]
A_ : Tuple = xa
A_ : str = ya
invpath.append([x, y] )
A_ : Any = []
for i in range(len(_lowercase ) ):
path.append(invpath[len(_lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase :str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase :str = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase :Dict = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase :Tuple = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase :Optional[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase :int = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase :Tuple = 9_9
lowerCamelCase , lowerCamelCase :List[Any] = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i]) | 667 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class snake_case_ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = " ") -> List[str]:
UpperCamelCase = sentence_delimiter
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple:
return list(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
UpperCamelCase = []
for sent_idx, sentence in enumerate(lowerCamelCase_):
chars.extend(self.process_string(lowerCamelCase_))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_) - 1:
chars.append(self.sentence_delimiter)
return chars
SCREAMING_SNAKE_CASE_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
SCREAMING_SNAKE_CASE_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
SCREAMING_SNAKE_CASE_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 34 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""note_seq"""]
def __init__( self : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(self , ['note_seq'])
@classmethod
def __lowerCamelCase ( cls : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['note_seq'])
@classmethod
def __lowerCamelCase ( cls : List[str] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(cls , ['note_seq'])
| 474 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = '''left'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<sep>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<eop>", "<eod>"] , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = 3
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
@property
def UpperCAmelCase__ ( self) -> List[str]:
return len(self.sp_model)
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Any:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> str:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
if self.remove_space:
UpperCamelCase = ''' '''.join(inputs.strip().split())
else:
UpperCamelCase = inputs
UpperCamelCase = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCamelCase = unicodedata.normalize('''NFKD''' , lowerCamelCase_)
UpperCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase_)])
if self.do_lower_case:
UpperCamelCase = outputs.lower()
return outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
UpperCamelCase = []
for piece in pieces:
if len(lowerCamelCase_) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase_ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCamelCase = cur_pieces[1:]
else:
UpperCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCamelCase_)
else:
new_pieces.append(lowerCamelCase_)
return new_pieces
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.sp_model.IdToPiece(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = ''''''.join(lowerCamelCase_).replace(lowerCamelCase_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> str:
UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCamelCase_)
UpperCamelCase = self.convert_ids_to_tokens(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
UpperCamelCase = []
sub_texts.append(lowerCamelCase_)
else:
current_sub_text.append(lowerCamelCase_)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase_))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(lowerCamelCase_)
return clean_text
else:
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_)) + [1, 1]
return ([0] * len(lowerCamelCase_)) + [1, 1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,) | 34 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
SCREAMING_SNAKE_CASE_:Union[str, Any] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
__lowerCamelCase : List[Any] = 42
__lowerCamelCase : List[str] = None
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : str = None
def _lowerCAmelCase ( self ):
A , A , A : Dict = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def _lowerCAmelCase ( self ):
return self.major, self.minor, self.patch
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
return Version(lowerCamelCase_ )
elif isinstance(lowerCamelCase_, lowerCamelCase_ ):
return other
raise TypeError(f'''{other} (type {type(lowerCamelCase_ )}) cannot be compared to version.''' )
def __eq__( self, lowerCamelCase__ ):
try:
A : Optional[Any] = self._validate_operand(lowerCamelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self, lowerCamelCase__ ):
A : Union[str, Any] = self._validate_operand(lowerCamelCase_ )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Any = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _lowerCAmelCase ( self ):
return self.version_str
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : List[str] = _VERSION_REG.match(_lowercase )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(_lowercase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def __UpperCamelCase ( _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
return ".".join(str(_lowercase ) for v in version_tuple )
| 662 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
snake_case__ : Tuple = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None):
UpperCamelCase_ = XLNetConfig.from_json_file(_lowercase)
UpperCamelCase_ = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""")
UpperCamelCase_ = finetuning_task
UpperCamelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCamelCase_ = XLNetForSequenceClassification(_lowercase)
elif "squad" in finetuning_task:
UpperCamelCase_ = finetuning_task
UpperCamelCase_ = XLNetForQuestionAnswering(_lowercase)
else:
UpperCamelCase_ = XLNetLMHeadModel(_lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase , _lowercase , _lowercase)
# Save pytorch-model
UpperCamelCase_ = os.path.join(_lowercase , _lowercase)
UpperCamelCase_ = os.path.join(_lowercase , _lowercase)
print(f"""Save PyTorch model to {os.path.abspath(_lowercase)}""")
torch.save(model.state_dict() , _lowercase)
print(f"""Save configuration file to {os.path.abspath(_lowercase)}""")
with open(_lowercase , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
snake_case__ : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 23 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any) -> Tuple:
'''simple docstring'''
def constraint_to_multiple_of(_lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Any=None):
__UpperCamelCase : Union[str, Any] = round(val / multiple) * multiple
if max_val is not None and x > max_val:
__UpperCamelCase : Any = math.floor(val / multiple) * multiple
if x < min_val:
__UpperCamelCase : List[str] = math.ceil(val / multiple) * multiple
return x
__UpperCamelCase : Optional[int] = (output_size, output_size) if isinstance(_lowercase , _lowercase) else output_size
__UpperCamelCase , __UpperCamelCase : Optional[int] = get_image_size(_lowercase)
__UpperCamelCase , __UpperCamelCase : List[Any] = output_size
# determine new height and width
__UpperCamelCase : List[str] = output_height / input_height
__UpperCamelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
__UpperCamelCase : Any = scale_width
else:
# fit height
__UpperCamelCase : Union[str, Any] = scale_height
__UpperCamelCase : str = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase)
__UpperCamelCase : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase)
return (new_height, new_width)
class lowerCamelCase__ ( lowerCamelCase_):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self :List[Any] , a :Optional[Any] = True , a :Optional[Any] = None , a :int = PILImageResampling.BILINEAR , a :List[str] = False , a :Union[str, Any] = 1 , a :Optional[Any] = True , a :str = 1 / 2_5_5 , a :str = True , a :Tuple = None , a :Any = None , **a :List[str] , ) -> None:
super().__init__(**lowerCamelCase_ )
__UpperCamelCase : Tuple = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
__UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase_ )
__UpperCamelCase : Tuple = do_resize
__UpperCamelCase : List[str] = size
__UpperCamelCase : Optional[Any] = keep_aspect_ratio
__UpperCamelCase : int = ensure_multiple_of
__UpperCamelCase : Any = resample
__UpperCamelCase : List[str] = do_rescale
__UpperCamelCase : List[Any] = rescale_factor
__UpperCamelCase : Any = do_normalize
__UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self :Optional[int] , a :Any , a :Union[str, Any] , a :Tuple = False , a :Any = 1 , a :Dict = PILImageResampling.BICUBIC , a :str = None , **a :Optional[int] , ) -> np.ndarray:
__UpperCamelCase : Any = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__UpperCamelCase : List[str] = get_resize_output_image_size(
lowerCamelCase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=lowerCamelCase_ , multiple=lowerCamelCase_ , )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def _lowerCamelCase ( self :Any , a :Union[str, Any] , a :Any , a :Any = None , **a :List[Any] , ) -> Any:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def _lowerCamelCase ( self :Optional[int] , a :Dict , a :Optional[Any] , a :Tuple , a :List[str] = None , **a :Optional[Any] , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def _lowerCamelCase ( self :List[Any] , a :Optional[Any] , a :Optional[Any] = None , a :Optional[int] = None , a :Optional[int] = None , a :Tuple = None , a :Optional[int] = None , a :Optional[int] = None , a :List[Any] = None , a :Optional[Any] = None , a :List[str] = None , a :str = None , a :Union[str, Any] = None , a :Union[str, Any] = ChannelDimension.FIRST , **a :List[Any] , ) -> PIL.Image.Image:
__UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : List[str] = size if size is not None else self.size
__UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase_ )
__UpperCamelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__UpperCamelCase : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__UpperCamelCase : str = resample if resample is not None else self.resample
__UpperCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase : int = image_std if image_std is not None else self.image_std
__UpperCamelCase : List[str] = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__UpperCamelCase : str = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
__UpperCamelCase : Optional[int] = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
__UpperCamelCase : str = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
__UpperCamelCase : Optional[int] = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
__UpperCamelCase : Dict = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
__UpperCamelCase : Dict = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
def _lowerCamelCase ( self :List[Any] , a :Any , a :str = None ) -> Optional[int]:
__UpperCamelCase : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCamelCase_ ):
__UpperCamelCase : Optional[Any] = target_sizes.numpy()
__UpperCamelCase : List[str] = []
for idx in range(len(lowerCamelCase_ ) ):
__UpperCamelCase : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCamelCase_ )
__UpperCamelCase : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase_ )
else:
__UpperCamelCase : str = logits.argmax(dim=1 )
__UpperCamelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 557 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowercase ,id=_lowercase ) | 34 | 0 |
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = data
A__ = None
A__ = None
def UpperCamelCase ( _A : List[Any] )-> int: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase ( _A : Dict )-> Any:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase ( )-> Union[str, Any]: # Main function for testing.
"""simple docstring"""
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
A__ = Node(6 )
A__ = Node(7 )
A__ = Node(8 )
A__ = Node(9 )
print(is_full_binary_tree(_lowercase ) )
print(depth_of_tree(_lowercase ) )
print("Tree is: " )
display(_lowercase )
if __name__ == "__main__":
main()
| 491 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 | 0 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
A_: List[Any] = logging.get_logger(__name__)
A_: Optional[int] = {}
A_: Tuple = {}
A_: Dict = {}
def __lowerCAmelCase ( _A ,_A ,_A = None ,):
"""simple docstring"""
_lowercase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
_lowercase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
_lowercase = format_type
def __lowerCAmelCase ( _A ,_A ,_A = None ):
"""simple docstring"""
_lowercase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_lowercase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
A_: Any = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
A_: Optional[Any] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
A_: Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __lowerCAmelCase ( _A ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( _A ,**_A ):
"""simple docstring"""
_lowercase = get_format_type_from_alias(_lowercase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowercase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 398 |
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 ,len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase ,_lowercase ,_lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def a_ ( *A__ : List[str] , **A__ : Optional[int] ):
"""simple docstring"""
pass
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __lowercase (_lowercase ) -> Tuple:
"""simple docstring"""
__lowerCamelCase : List[Any] = np.array(_lowercase )
__lowerCamelCase : str = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : Optional[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def a_ ( self : Tuple , A__ : int , A__ : Tuple , A__ : List[str] ):
"""simple docstring"""
__lowerCamelCase : Tuple = MaskGenerationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a_ ( self : int , A__ : Tuple , A__ : Any ):
"""simple docstring"""
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def a_ ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def a_ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__lowerCamelCase : Union[str, Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__lowerCamelCase : List[str] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : List[str] = """facebook/sam-vit-huge"""
__lowerCamelCase : Optional[int] = pipeline("""mask-generation""" , model=lowerCamelCase_ )
__lowerCamelCase : str = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__lowerCamelCase : Optional[Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 150 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names | 34 | 0 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] = "cpu" , UpperCAmelCase__ : Dict = None):
lowerCamelCase : List[Any] = torch.load(_lowercase , map_location=_lowercase)
for k, v in tqdm(state_dict.items()):
if not isinstance(_lowercase , torch.Tensor):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin')
lowerCamelCase : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase : Tuple = src_path
torch.save(_lowercase , _lowercase)
if __name__ == "__main__":
fire.Fire(convert)
| 320 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(_lowercase ,_lowercase ,_lowercase )
count += _in_place_quick_sort(_lowercase ,_lowercase ,p - 1 )
count += _in_place_quick_sort(_lowercase ,p + 1 ,_lowercase )
return count
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = randint(_lowercase ,_lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(_lowercase ,_lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
SCREAMING_SNAKE_CASE_ = TemporaryFile()
SCREAMING_SNAKE_CASE_ = 100 # 1000 elements are to be sorted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1 # mean and standard deviation
SCREAMING_SNAKE_CASE_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
SCREAMING_SNAKE_CASE_ = np.load(outfile)
SCREAMING_SNAKE_CASE_ = len(M) - 1
SCREAMING_SNAKE_CASE_ = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 34 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "huggingface/label-files"
_lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowercase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowerCamelCase : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
if "stem.conv" in name:
_lowerCamelCase : List[Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCamelCase : List[str] = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowerCamelCase : Tuple = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowerCamelCase : str = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowerCamelCase : Optional[Any] = "bit.encoder." + name
return name
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
_lowerCamelCase : int = get_config(_lowercase )
# load original model from timm
_lowerCamelCase : Tuple = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
_lowerCamelCase : Optional[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowerCamelCase : Optional[Any] = state_dict.pop(_lowercase )
_lowerCamelCase : Dict = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowerCamelCase : Optional[Any] = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
_lowerCamelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_lowercase ) )
_lowerCamelCase : Optional[int] = transform.transforms
_lowerCamelCase : Optional[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowerCamelCase : str = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Dict = transform(_lowercase ).unsqueeze(0 )
_lowerCamelCase : str = processor(_lowercase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(_lowercase )
_lowerCamelCase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowerCamelCase : Dict = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 44 |
"""simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, 'src', 'transformers')
SCREAMING_SNAKE_CASE_ = '\n{0} = None\n'
SCREAMING_SNAKE_CASE_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
SCREAMING_SNAKE_CASE_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(lowerCamelCase_)
UpperCamelCase = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(lowerCamelCase_ , '''tokenizers''')
UpperCamelCase = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(lowerCamelCase_ , '''tensorflow_text''')
UpperCamelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tensorflow_text''')
UpperCamelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(lowerCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''')
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCamelCase_)
self.assertIn('''tensorflow_text''' , lowerCamelCase_)
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCamelCase_)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , '''\nCONSTANT = None\n''')
UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
lowerCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , lowerCamelCase_) | 34 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = np.inf
def set_batch_size(UpperCAmelCase__ ) -> None:
nonlocal batch_size
if isinstance(_lowercase , _lowercase ):
lowerCamelCase = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase , _lowercase ):
lowerCamelCase = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase , _lowercase ) and feature.dtype == "binary":
lowerCamelCase = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase , _lowercase )
return None if batch_size is np.inf else batch_size
class lowerCamelCase__ ( lowerCamelCase_):
"""simple docstring"""
def __init__(self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
lowerCamelCase = _PACKAGED_DATASETS_MODULES["parquet"][1]
lowerCamelCase = Parquet(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , hash=lowerCamelCase_ , **lowerCamelCase_ , )
def _a (self ):
'''simple docstring'''
if self.streaming:
lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a , __a , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = dataset
lowerCamelCase = path_or_buf
lowerCamelCase = batch_size or get_writer_batch_size(dataset.features )
lowerCamelCase = parquet_writer_kwargs
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
lowerCamelCase = self._write(file_obj=lowerCamelCase_ , batch_size=lowerCamelCase_ , **self.parquet_writer_kwargs )
else:
lowerCamelCase = self._write(file_obj=self.path_or_buf , batch_size=lowerCamelCase_ , **self.parquet_writer_kwargs )
return written
def _a (self , __a , __a , **__a ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = parquet_writer_kwargs.pop("path_or_buf" , lowerCamelCase_ )
lowerCamelCase = self.dataset.features.arrow_schema
lowerCamelCase = pq.ParquetWriter(lowerCamelCase_ , schema=lowerCamelCase_ , **lowerCamelCase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCamelCase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
lowerCamelCase = query_table(
table=self.dataset._data , key=slice(lowerCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCamelCase_ )
written += batch.nbytes
writer.close()
return written | 623 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase :List[Any] = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _lowerCAmelCase ( tr.AbstractTransform ):
def __init__(self , lowercase = " " ):
A_ : Dict = sentence_delimiter
def _a (self , lowercase ):
return list(lowerCamelCase_ )
def _a (self , lowercase ):
A_ : Any = []
for sent_idx, sentence in enumerate(lowerCamelCase_ ):
chars.extend(self.process_string(lowerCamelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase :str = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase :List[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase :Optional[int] = '''\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'''
lowerCamelCase :str = '''\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'''
lowerCamelCase :Optional[int] = '''\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _a (self , lowercase , lowercase , lowercase=False ):
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )["wer"]
A_ : Any = 0
A_ : Optional[Any] = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
A_ : Dict = jiwer.compute_measures(
lowerCamelCase_ , lowerCamelCase_ , truth_transform=lowerCamelCase_ , hypothesis_transform=lowerCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 667 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class _UpperCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
super().__init__(**lowerCamelCase_)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Tuple , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def __lowerCamelCase ( self : Tuple , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase ={}
if "candidate_labels" in kwargs:
__lowercase =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__lowercase =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Union[str, Any]="This is a photo of {}."):
'''simple docstring'''
__lowercase =load_image(lowerCamelCase_)
__lowercase =self.image_processor(images=[image] , return_tensors=self.framework)
__lowercase =candidate_labels
__lowercase =[hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
__lowercase =self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
__lowercase =[text_inputs]
return inputs
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
__lowercase =model_inputs.pop('candidate_labels')
__lowercase =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , lowerCamelCase_):
__lowercase =text_inputs[0]
else:
# Batching case.
__lowercase =text_inputs[0][0]
__lowercase =self.model(**lowerCamelCase_ , **lowerCamelCase_)
__lowercase ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Dict):
'''simple docstring'''
__lowercase =model_outputs.pop('candidate_labels')
__lowercase =model_outputs['logits'][0]
if self.framework == "pt":
__lowercase =logits.softmax(dim=-1).squeeze(-1)
__lowercase =probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowercase =[scores]
elif self.framework == "tf":
__lowercase =stable_softmax(lowerCamelCase_ , axis=-1)
__lowercase =probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""")
__lowercase =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda _lowerCAmelCase: -x[0])
]
return result
| 474 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ = 1_0_1) -> Tuple:
UpperCamelCase = length
def __len__( self) -> List[str]:
return self.length
def __getitem__( self , lowerCamelCase_) -> int:
return i
class snake_case_ :
"""simple docstring"""
def __call__( self , lowerCamelCase_) -> str:
return {"input_ids": torch.tensor(lowerCamelCase_), "labels": torch.tensor(lowerCamelCase_)}
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> List[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase = nn.Linear(1_2_0 , 8_0)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None) -> Any:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_neuroncore
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@require_torch_multi_gpu
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F'--output_dir {output_dir}'.split()
UpperCamelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE_ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE_ = DummyDataset(dataset_length)
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = list(range(len(_lowercase ) ) )
UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
SCREAMING_SNAKE_CASE_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE_ = None | 34 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[str] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Any = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A : Optional[Any] = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
A : Dict = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A : List[Any] = """<|endoftext|>""" if eos_token is None else eos_token
A : List[Any] = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A : Union[str, Any] = unk_token if pad_token is None else pad_token
A : List[Any] = eos_token if bos_token is None else bos_token
else:
A : List[str] = """<pad>""" if pad_token is None else pad_token
A : Dict = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase_, remove_space=lowerCamelCase_, keep_accents=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, pad_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
A : str = do_lower_case
A : Optional[Any] = remove_space
A : Any = keep_accents
A : Optional[int] = vocab_file
A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
A : Optional[Any] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A : Union[str, Any] = re.compile(
f'''[{"".join(map(lowerCamelCase_, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(127, 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self ):
A : Any = self.__dict__.copy()
A : str = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : Optional[int] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : Optional[int] = {}
A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _lowerCAmelCase ( self ):
return len(self.sp_model )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[str] = self.non_printing_characters_re.sub("""""", lowerCamelCase_ )
# Normalize whitespaces
A : str = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
A : Union[str, Any] = unicodedata.normalize("""NFC""", lowerCamelCase_ )
return text
def _lowerCAmelCase ( self, lowerCamelCase__, **lowerCamelCase__ ):
A : str = self.preprocess_text(lowerCamelCase_ )
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.PieceToId(lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.IdToPiece(lowerCamelCase_ )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__ ):
return out_string
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = []
A : Dict = """"""
A : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
A : List[str] = True
A : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
A : Any = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string
def _lowerCAmelCase ( self ):
A : str = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : List[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, """wb""" ) as fi:
A : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = False ):
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
A : str = self.preprocess_text(lowerCamelCase_ )
A : Optional[Any] = self.sp_model.encode(lowerCamelCase_ )
else:
A : int = [self.preprocess_text(lowerCamelCase_ ) for t in text]
A : List[Any] = self.sp_model.encode(lowerCamelCase_ )
if return_tensors is True or return_tensors == "pt":
A : Union[str, Any] = torch.tensor(lowerCamelCase_ )
return token_ids
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.decode(lowerCamelCase_ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[str] = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
A : Optional[Any] = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(lowerCamelCase_ ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=lowerCamelCase_ )
| 662 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE_ = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
SCREAMING_SNAKE_CASE_ = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE_ = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for tf_name, hf_name in patterns:
UpperCamelCase = k.replace(_lowercase ,_lowercase )
return k
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = BigBirdPegasusConfig(**_lowercase )
UpperCamelCase = BigBirdPegasusForConditionalGeneration(_lowercase )
UpperCamelCase = torch_model.state_dict()
UpperCamelCase = {}
# separating decoder weights
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
UpperCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = DECODER_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
UpperCamelCase = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
UpperCamelCase = REMAINING_PATTERNS
UpperCamelCase = rename_state_dict_key(_lowercase ,_lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
UpperCamelCase = v.T
UpperCamelCase = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCamelCase = mapping['''model.embed_positions.weight''']
UpperCamelCase = mapping.pop('''model.embed_positions.weight''' )
UpperCamelCase , UpperCamelCase = torch_model.load_state_dict(_lowercase ,strict=_lowercase )
UpperCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = tf.train.list_variables(_lowercase )
UpperCamelCase = {}
UpperCamelCase = ['''global_step''']
for name, shape in tqdm(_lowercase ,desc='''converting tf checkpoint to dict''' ):
UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCamelCase = tf.train.load_variable(_lowercase ,_lowercase )
UpperCamelCase = array
return tf_weights
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = get_tf_weights_as_numpy(_lowercase )
UpperCamelCase = convert_bigbird_pegasus(_lowercase ,_lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 34 | 0 |
import inspect
import unittest
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase_ = inspect.getmembers(lowerCamelCase_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase_ = 'k-diffusion'
elif backend == "invisible_watermark":
UpperCamelCase_ = 'invisible-watermark'
assert backend in deps, f"""{backend} is not in the deps table!"""
| 23 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = analyze_text(_lowercase )
UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCamelCase = sum(single_char_strings.values() )
# one length string
UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCamelCase = single_char_strings[ch]
UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
UpperCamelCase = sum(two_char_strings.values() )
UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCamelCase = cha + cha
if sequence in two_char_strings:
UpperCamelCase = two_char_strings[sequence]
UpperCamelCase = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = Counter() # type: ignore
UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 34 | 0 |
import re
import string
import numpy as np
import datasets
lowercase : Dict = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase : List[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] , a :Optional[int] , a :List[str]=None , a :Dict=False , a :Dict=False , a :List[Any]=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCamelCase : Any = np.array([re.sub(lowerCamelCase_ , "" , lowerCamelCase_ ) for x in predictions] )
__UpperCamelCase : Optional[int] = np.array([re.sub(lowerCamelCase_ , "" , lowerCamelCase_ ) for x in references] )
else:
__UpperCamelCase : str = np.asarray(lowerCamelCase_ )
__UpperCamelCase : int = np.asarray(lowerCamelCase_ )
if ignore_case:
__UpperCamelCase : Dict = np.char.lower(lowerCamelCase_ )
__UpperCamelCase : Optional[Any] = np.char.lower(lowerCamelCase_ )
if ignore_punctuation:
__UpperCamelCase : List[str] = string.punctuation.maketrans("" , "" , string.punctuation )
__UpperCamelCase : List[str] = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
__UpperCamelCase : List[Any] = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
if ignore_numbers:
__UpperCamelCase : Union[str, Any] = string.digits.maketrans("" , "" , string.digits )
__UpperCamelCase : Union[str, Any] = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
__UpperCamelCase : Any = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_ )
__UpperCamelCase : str = predictions == references
return {"exact_match": np.mean(lowerCamelCase_ ) * 1_0_0} | 557 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowerCamelCase_ , )
return config, input_ids, attention_mask
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = FlaxDistilBertModelTester(self)
@slow
def UpperCAmelCase__ ( self) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCamelCase_)
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''')
UpperCamelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0]
UpperCamelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowerCamelCase_)
UpperCamelCase = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1e-4)) | 34 | 0 |
import sys
from pathlib import Path
UpperCAmelCase_ : Optional[int] = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCAmelCase_ : Union[str, Any] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
UpperCAmelCase_ : int = "zero2"
UpperCAmelCase_ : List[Any] = "zero3"
UpperCAmelCase_ : Union[str, Any] = [ZEROa, ZEROa]
def UpperCamelCase ( _A : str , _A : Tuple , _A : str )-> str:
"""simple docstring"""
A__ = parameterized.to_safe_name("_".join(str(_lowercase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
UpperCAmelCase_ : Any = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCamelCase ( lowerCamelCase_ ):
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def __A ( self , UpperCAmelCase__ ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 10 , UpperCAmelCase__ = True , UpperCAmelCase__ = True , UpperCAmelCase__ = True , ):
A__ = models[model]
A__ = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_ )
return output_dir
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1 , UpperCAmelCase__ = True , UpperCAmelCase__ = True , ):
A__ = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase_ )
A__ = F"""\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n """.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
A__ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
A__ = self.get_launcher(lowerCamelCase_ )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
return output_dir
def __A ( self , UpperCAmelCase__=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A__ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.