code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCAmelCase__ ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index == len(lowerCAmelCase__ ):
return True
# Recursive Step
for i in range(lowerCAmelCase__ ):
if valid_coloring(graph[index] , lowerCAmelCase__ , lowerCAmelCase__ ):
# Color current vertex
__snake_case : Union[str, Any] = i
# Validate coloring
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ):
return True
# Backtrack
__snake_case : Dict = -1
return False
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Dict = [-1] * len(lowerCAmelCase__ )
if util_color(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , 0 ):
return colored_vertices
return []
| 365 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
lowercase_ = "▁"
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
A : Tuple = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = AlbertTokenizer
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : int="[CLS]" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Tuple="<unk>" , _lowerCAmelCase : List[Any]="[SEP]" , _lowerCAmelCase : Tuple="<pad>" , _lowerCAmelCase : List[Any]="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , **_lowerCAmelCase : Optional[int] , ):
__snake_case : List[Any] = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[str] = remove_space
__snake_case : Any = keep_accents
__snake_case : Union[str, Any] = vocab_file
__snake_case : Union[str, Any] = False if not self.vocab_file else True
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : List[str] = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : int , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Tuple = [self.sep_token_id]
__snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : List[str] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 366 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
A : Any = BioGptTokenizer
A : Dict = False
def snake_case__ ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__snake_case : Optional[Any] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : Dict = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def snake_case__ ( self : str , _lowerCAmelCase : Any ):
__snake_case : List[Any] = 'lower newer'
__snake_case : List[Any] = 'lower newer'
return input_text, output_text
def snake_case__ ( self : Dict ):
__snake_case : str = BioGptTokenizer(self.vocab_file , self.merges_file )
__snake_case : Optional[int] = 'lower'
__snake_case : Optional[int] = ['low', 'er</w>']
__snake_case : Tuple = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[Any] = tokens + ['<unk>']
__snake_case : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def snake_case__ ( self : Union[str, Any] ):
__snake_case : str = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__snake_case : Union[str, Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
import string
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : List[Any] = """"""
for i in sequence:
__snake_case : str = ord(__UpperCamelCase )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : List[Any] = string.ascii_letters
__snake_case : str = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__UpperCamelCase )] if c in letters else c for c in sequence )
def __lowerCAmelCase ( ):
'''simple docstring'''
from timeit import timeit
print("""Running performance benchmarks...""" )
__snake_case : Dict = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=__UpperCamelCase )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=__UpperCamelCase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 368 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_lowerCAmelCase ):
A : str = ["keras_nlp"]
def __init__( self : Optional[int] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Tuple ):
requires_backends(self , ["""keras_nlp"""] )
| 369 | import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if hor == 1_2_8:
__snake_case : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__snake_case : Optional[int] = (3_2, 1_2_8, 2_5_6)
__snake_case : Dict = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 3_2:
__snake_case : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__snake_case : Optional[int] = (3_2, 6_4, 1_2_8, 2_5_6)
__snake_case : Tuple = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__snake_case : List[str] = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
__snake_case : Tuple = model.state_dict()
__snake_case : List[Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 1_4,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5_5_3_6,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__snake_case : List[Any] = UNetaDModel(**a__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case : Optional[int] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case : int = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(a__ , a__ )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = {
"""in_channels""": 1_4,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5_5_3_6,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__snake_case : List[str] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__snake_case : Tuple = model
__snake_case : List[Any] = UNetaDModel(**a__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
__snake_case : Union[str, Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case : Tuple = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
A : int = ""
A : int = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : str , _lowerCAmelCase : Optional[DatasetInfo] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(self , **_A )
__snake_case : List[Any] = repo_info
__snake_case : Tuple = token
__snake_case : Any = None
def snake_case__ ( self : List[str] ):
if self.dir_cache is None:
__snake_case : Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__snake_case : Optional[Any] = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_A ): {"""name""": str(_A ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str = "rb" , **_lowerCAmelCase : Tuple , ):
if not isinstance(self.repo_info , _A ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__snake_case : List[str] = hf_hub_url(self.repo_info.id , _A , revision=self.repo_info.sha )
return fsspec.open(
_A , mode=_A , headers=get_authentication_headers_for_url(_A , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def snake_case__ ( self : int , _lowerCAmelCase : Tuple , **_lowerCAmelCase : str ):
self._get_dirs()
__snake_case : Optional[Any] = self._strip_protocol(_A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_A )
def snake_case__ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Any ):
self._get_dirs()
__snake_case : Dict = PurePosixPath(path.strip("""/""" ) )
__snake_case : Union[str, Any] = {}
for p, f in self.dir_cache.items():
__snake_case : Optional[int] = PurePosixPath(p.strip("""/""" ) )
__snake_case : Optional[int] = p.parent
if root == path:
__snake_case : Tuple = f
__snake_case : Any = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 371 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
lowercase_ = {"allegro/herbert-base-cased": 5_14}
lowercase_ = {}
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
A : int = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : str = HerbertTokenizer
def __init__( self : str , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : Optional[int]="<unk>" , _lowerCAmelCase : List[Any]="<pad>" , _lowerCAmelCase : Dict="<mask>" , _lowerCAmelCase : Optional[int]="</s>" , **_lowerCAmelCase : List[str] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , **_lowerCAmelCase , )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[Any] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 350 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
snake_case_ = logging.getLogger(__name__)
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowerCAmelCase__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowerCAmelCase__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowerCAmelCase__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowerCAmelCase__ , default="""data/dump""" , help="""The dump file prefix.""" )
__snake_case : Optional[int] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__snake_case : Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
__snake_case : int = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__snake_case : int = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__snake_case : int = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case : int = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__snake_case : Optional[Any] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__snake_case : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__snake_case : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__snake_case : Dict = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
__snake_case : Union[str, Any] = fp.readlines()
logger.info("""Start encoding""" )
logger.info(F'''{len(lowerCAmelCase__ )} examples to process.''' )
__snake_case : Optional[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : List[Any] = 1_0_0_0_0
__snake_case : Tuple = time.time()
for text in data:
__snake_case : Optional[Any] = F'''{bos} {text.strip()} {sep}'''
__snake_case : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
rslt.append(lowerCAmelCase__ )
iter += 1
if iter % interval == 0:
__snake_case : List[Any] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__snake_case : Any = time.time()
logger.info("""Finished binarization""" )
logger.info(F'''{len(lowerCAmelCase__ )} examples processed.''' )
__snake_case : Optional[int] = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__snake_case : Dict = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
__snake_case : Optional[int] = [np.uintaa(lowerCAmelCase__ ) for d in rslt]
else:
__snake_case : List[str] = [np.intaa(lowerCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(lowerCAmelCase__ , """wb""" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 351 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 0 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = "WhisperFeatureExtractor"
A : Optional[Any] = "WhisperTokenizer"
def __init__( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = self.feature_extractor
__snake_case : List[Any] = False
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : str=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowerCAmelCase , language=_lowerCAmelCase , no_timestamps=_lowerCAmelCase )
def __call__( self : str , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : str ):
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[Any] = kwargs.pop("""audio""" , _lowerCAmelCase )
__snake_case : List[str] = kwargs.pop("""sampling_rate""" , _lowerCAmelCase )
__snake_case : Dict = kwargs.pop("""text""" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__snake_case : int = args[0]
__snake_case : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__snake_case : Union[str, Any] = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None:
__snake_case : Optional[int] = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : Union[str, Any] = encodings["""input_ids"""]
return inputs
def snake_case__ ( self : Union[str, Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[int] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]="np" ):
return self.tokenizer.get_prompt_ids(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
| 352 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( _A : list[int] ):
'''simple docstring'''
if len(_A ) == 0:
return array
__snake_case , __snake_case : Tuple = min(_A ), max(_A )
# Compute the variables
__snake_case : List[str] = _max - _min + 1
__snake_case , __snake_case : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__snake_case : Optional[Any] = i - _min
__snake_case : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__snake_case : Optional[Any] = 0
for i in range(_A ):
while holes_repeat[i] > 0:
__snake_case : Union[str, Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = input("Enter numbers separated by comma:\n")
lowercase_ = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 353 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20 | 0 |
import unittest
from transformers import DonutProcessor
lowercase_ = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ):
__snake_case : Tuple = DonutProcessor.from_pretrained(UpperCamelCase_ )
def snake_case__ ( self : Optional[Any] ):
__snake_case : List[str] = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__snake_case : List[str] = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__snake_case : Union[str, Any] = self.processor.tokenajson(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
| 354 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
A : Union[str, Any] = ["pixel_values"]
def __init__( self : List[str] , _lowerCAmelCase : Optional[Any] = True , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : List[Any] = PIL.Image.BICUBIC , _lowerCAmelCase : Optional[int] = True , _lowerCAmelCase : Any = None , _lowerCAmelCase : Optional[int] = 1 / 2_55 , _lowerCAmelCase : Union[str, Any] = True , _lowerCAmelCase : Any = True , _lowerCAmelCase : Any = None , _lowerCAmelCase : int = None , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCamelCase )
__snake_case : Union[str, Any] = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__snake_case : Dict = get_size_dict(__lowerCamelCase )
__snake_case : Dict = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__snake_case : Any = get_size_dict(__lowerCamelCase , param_name="""crop_size""" )
__snake_case : str = do_resize
__snake_case : Any = size
__snake_case : Optional[Any] = resample
__snake_case : Dict = do_center_crop
__snake_case : int = crop_size
__snake_case : List[Any] = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Union[str, Any] = do_normalize
__snake_case : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] = PIL.Image.BICUBIC , _lowerCAmelCase : Tuple = None , **_lowerCAmelCase : Any , ):
__snake_case : int = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
__lowerCamelCase , size=(size["""height"""], size["""width"""]) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict = None , **_lowerCAmelCase : Tuple , ):
__snake_case : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : List[str] , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] = None , **_lowerCAmelCase : str , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any = None , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : str = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : str = None , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Optional[int] = ChannelDimension.FIRST , **_lowerCAmelCase : List[Any] , ):
__snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : List[Any] = resample if resample is not None else self.resample
__snake_case : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Dict = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case : str = size if size is not None else self.size
__snake_case : Optional[Any] = get_size_dict(__lowerCamelCase )
__snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
__snake_case : Union[str, Any] = get_size_dict(__lowerCamelCase , param_name="""crop_size""" )
__snake_case : Optional[Any] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : List[str] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
__snake_case : List[str] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
__snake_case : str = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
__snake_case : Tuple = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
__snake_case : List[Any] = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
__snake_case : Optional[int] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__snake_case : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 355 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
from __future__ import annotations
from random import choice
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return choice(UpperCAmelCase_ )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Optional[int] = random_pivot(UpperCAmelCase_ )
# partition based on pivot
# linear time
__snake_case : List[Any] = [e for e in lst if e < pivot]
__snake_case : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCAmelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCAmelCase_ ) < k - 1:
return kth_number(UpperCAmelCase_ , k - len(UpperCAmelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 0 |
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self : Tuple , _lowerCAmelCase : int ):
__snake_case : int = data
def __iter__( self : List[str] ):
for element in self.data:
yield element
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
'''simple docstring'''
__snake_case : int = Accelerator(even_batches=_A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str = False ):
'''simple docstring'''
if iterable:
__snake_case : Optional[int] = DummyIterableDataset(torch.as_tensor(range(_A ) ) )
else:
__snake_case : Tuple = TensorDataset(torch.as_tensor(range(_A ) ) )
__snake_case : List[Any] = DataLoader(_A , batch_size=_A )
__snake_case : int = accelerator.prepare(_A )
return dl
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Tuple = create_dataloader(accelerator=_A , dataset_size=_A , batch_size=_A )
__snake_case : List[str] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[str] = create_accelerator(even_batches=_A )
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[Any] = create_accelerator(even_batches=_A )
__snake_case : str = torch.nn.Linear(1 , 1 )
__snake_case : Any = accelerator.prepare(_A )
__snake_case : List[Any] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
__snake_case : List[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_A ):
__snake_case : List[Any] = ddp_model(batch[0].float() )
__snake_case : Optional[int] = output.sum()
loss.backward()
batch_idxs.append(_A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for multi-GPU" in str(w[-1].message )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Optional[Any] = True
__snake_case : List[str] = False
__snake_case : Tuple = create_accelerator(even_batches=_A )
__snake_case : Optional[Any] = torch.nn.Linear(1 , 1 )
__snake_case : List[Any] = accelerator.prepare(_A )
__snake_case : Optional[int] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
__snake_case : Dict = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
__snake_case : str = train_dl.batch_sampler.even_batches
__snake_case : Optional[Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : int = True
__snake_case : int = False
__snake_case : Union[str, Any] = create_accelerator(even_batches=_A )
__snake_case : List[Any] = torch.nn.Linear(1 , 1 )
__snake_case : Optional[int] = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
__snake_case : List[str] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
__snake_case : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Dict = create_accelerator()
__snake_case : List[Any] = torch.nn.Linear(1 , 1 )
__snake_case : Tuple = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for map-style datasets" in str(w[-1].message )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Optional[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
__snake_case : Any = accelerator.state.distributed_type
__snake_case : Dict = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_A )
__snake_case : str = original_state
if __name__ == "__main__":
main()
| 357 | import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
A : List[Any] = (DDPMScheduler,)
def snake_case__ ( self : str , **_lowerCAmelCase : str ):
__snake_case : str = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**A__ )
return config
def snake_case__ ( self : str ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A__ )
def snake_case__ ( self : Any ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A__ , beta_end=A__ )
def snake_case__ ( self : Dict ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A__ )
def snake_case__ ( self : Optional[int] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A__ )
def snake_case__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A__ )
def snake_case__ ( self : Tuple ):
self.check_over_configs(thresholding=A__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A__ , prediction_type=A__ , sample_max_value=A__ , )
def snake_case__ ( self : Optional[Any] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A__ )
def snake_case__ ( self : int ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=A__ )
def snake_case__ ( self : Dict ):
__snake_case : Union[str, Any] = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config()
__snake_case : Optional[Any] = scheduler_class(**A__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Tuple = self.scheduler_classes[0]
__snake_case : str = self.get_scheduler_config()
__snake_case : List[Any] = scheduler_class(**A__ )
__snake_case : str = len(A__ )
__snake_case : Optional[int] = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter
__snake_case : List[Any] = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
__snake_case : List[str] = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
__snake_case : List[Any] = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__snake_case : Optional[int] = pred_prev_sample
__snake_case : str = torch.sum(torch.abs(A__ ) )
__snake_case : List[str] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def snake_case__ ( self : List[Any] ):
__snake_case : str = self.scheduler_classes[0]
__snake_case : str = self.get_scheduler_config(prediction_type="""v_prediction""" )
__snake_case : Dict = scheduler_class(**A__ )
__snake_case : Tuple = len(A__ )
__snake_case : int = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
__snake_case : Any = torch.manual_seed(0 )
for t in reversed(range(A__ ) ):
# 1. predict noise residual
__snake_case : List[str] = model(A__ , A__ )
# 2. predict previous mean of sample x_t-1
__snake_case : str = scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__snake_case : int = pred_prev_sample
__snake_case : Dict = torch.sum(torch.abs(A__ ) )
__snake_case : Optional[int] = torch.mean(torch.abs(A__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def snake_case__ ( self : str ):
__snake_case : Union[str, Any] = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Union[str, Any] = scheduler_class(**A__ )
__snake_case : Optional[Any] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A__ )
__snake_case : int = scheduler.timesteps
for i, timestep in enumerate(A__ ):
if i == len(A__ ) - 1:
__snake_case : Optional[Any] = -1
else:
__snake_case : Dict = timesteps[i + 1]
__snake_case : Union[str, Any] = scheduler.previous_timestep(A__ )
__snake_case : Tuple = prev_t.item()
self.assertEqual(A__ , A__ )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[Any] = scheduler_class(**A__ )
__snake_case : Tuple = [1_00, 87, 50, 51, 0]
with self.assertRaises(A__ , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=A__ )
def snake_case__ ( self : Optional[int] ):
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**A__ )
__snake_case : Dict = [1_00, 87, 50, 1, 0]
__snake_case : int = len(A__ )
with self.assertRaises(A__ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=A__ , timesteps=A__ )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**A__ )
__snake_case : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=A__ )
| 358 | from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
import argparse
import copy
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__snake_case : List[str] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__snake_case : str = []
_list.append([line.split()[1], line.split()[2]] )
__snake_case : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__snake_case : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
__snake_case : int = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE ) as f:
__snake_case : List[Any] = f.read(1 )
__snake_case : Union[str, Any] = start_node
__snake_case : Any = []
__snake_case : Union[str, Any] = start_node
__snake_case : Any = 0
while visiting not in first_solution:
__snake_case : List[str] = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
__snake_case : int = k[1]
__snake_case : Optional[int] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__snake_case : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Tuple = []
for n in solution[1:-1]:
__snake_case : List[Any] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
__snake_case : List[Any] = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
__snake_case : List[str] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = kn
__snake_case : Dict = n
__snake_case : str = 0
for k in _tmp[:-1]:
__snake_case : int = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__snake_case : int = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__snake_case : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case : List[Any] = 1
__snake_case : Optional[int] = first_solution
__snake_case : List[str] = []
__snake_case : Dict = distance_of_first_solution
__snake_case : Optional[int] = solution
while count <= iters:
__snake_case : Optional[int] = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = 0
__snake_case : Optional[Any] = neighborhood[index_of_best_solution]
__snake_case : int = len(__SCREAMING_SNAKE_CASE ) - 1
__snake_case : Optional[int] = False
while not found:
__snake_case : Union[str, Any] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
__snake_case : int = best_solution[i]
__snake_case : str = solution[i]
break
__snake_case : Optional[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__snake_case : Union[str, Any] = True
__snake_case : Dict = best_solution[:-1]
__snake_case : Dict = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__snake_case : List[Any] = cost
__snake_case : Any = solution
else:
__snake_case : Dict = index_of_best_solution + 1
__snake_case : List[str] = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
__snake_case : Dict = count + 1
return best_solution_ever, best_cost
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any]=None ):
'''simple docstring'''
__snake_case : List[Any] = generate_neighbours(args.File )
__snake_case : Tuple = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
__snake_case : Any = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 360 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
lowercase_ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
lowercase_ = {
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
__snake_case : Tuple = json.loads(f.read() )
__snake_case : int = collections.OrderedDict()
__snake_case : Optional[int] = collections.OrderedDict()
__snake_case : List[Any] = collections.OrderedDict()
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
__snake_case : Dict = f.readlines()
__snake_case : Optional[int] = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCAmelCase__ ):
__snake_case : Optional[Any] = b
__snake_case : Any = idx
for wd in b:
__snake_case : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
A : List[Any] = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any="<|endoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : str="<|startoftext|>" , _lowerCAmelCase : Union[str, Any]="<|endoftext|>" , _lowerCAmelCase : Dict=False , **_lowerCAmelCase : Tuple , ):
super().__init__(
unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , do_clean_text=UpperCamelCase__ , **UpperCamelCase__ , )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__snake_case : str = do_clean_text
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = load_vocab_and_emoji(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case__ ( self : Any ):
return len(self.raw_vocab )
def snake_case__ ( self : Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] ):
return self.subword_tokenizer.tokenize(UpperCamelCase__ , clean=self.do_clean_text )
def snake_case__ ( self : Any , _lowerCAmelCase : List[str] ):
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : int , _lowerCAmelCase : Optional[Any] ):
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase__ )
def snake_case__ ( self : Dict , _lowerCAmelCase : int ):
__snake_case : Dict = """""".join(UpperCamelCase__ ).strip()
return out_string
def snake_case__ ( self : int , _lowerCAmelCase : Dict ):
__snake_case : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__snake_case : Dict = input_ids[-self.model_max_length :]
return input_ids
def snake_case__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] = None ):
__snake_case : Tuple = 0
if os.path.isdir(UpperCamelCase__ ):
__snake_case : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__snake_case : List[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__snake_case : List[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : str = token_index
writer.write(""",""".join(UpperCamelCase__ ) + """\n""" )
index += 1
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , UpperCamelCase__ )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( _snake_case ):
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
__snake_case : List[str] = vocab # same as swe
__snake_case : Dict = ids_to_tokens # same as bpe
__snake_case : Optional[Any] = emoji
__snake_case : str = np.max([len(UpperCamelCase__ ) for w in self.vocab.keys()] )
__snake_case : Dict = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__snake_case : int = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__snake_case : Union[str, Any] = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__snake_case : Any = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__snake_case : Any = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__snake_case : str = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__snake_case : Optional[int] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__snake_case : List[Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__snake_case : List[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : Optional[int] ):
return len(self.ids_to_tokens )
def snake_case__ ( self : Tuple , _lowerCAmelCase : Dict ):
__snake_case : int = self.content_repattera.sub("""<URL>""" , UpperCamelCase__ )
__snake_case : str = self.content_repattera.sub("""<EMAIL>""" , UpperCamelCase__ )
__snake_case : List[str] = self.content_repattera.sub("""<TEL>""" , UpperCamelCase__ )
__snake_case : Tuple = self.content_repattera.sub("""<DATE>""" , UpperCamelCase__ )
__snake_case : Any = self.content_repattera.sub("""<DATE>""" , UpperCamelCase__ )
__snake_case : Dict = self.content_repattera.sub("""<PRICE>""" , UpperCamelCase__ )
__snake_case : Dict = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__snake_case : List[str] = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=False ):
__snake_case : int = text.replace(""" """ , """<SP>""" )
__snake_case : List[str] = text.replace(""" """ , """<SP>""" )
__snake_case : List[Any] = text.replace("""\r\n""" , """<BR>""" )
__snake_case : Dict = text.replace("""\n""" , """<BR>""" )
__snake_case : List[str] = text.replace("""\r""" , """<BR>""" )
__snake_case : int = text.replace("""\t""" , """<TAB>""" )
__snake_case : str = text.replace("""—""" , """ー""" )
__snake_case : List[Any] = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__snake_case : Tuple = text.replace(UpperCamelCase__ , UpperCamelCase__ )
if clean:
__snake_case : Dict = self.clean_text(UpperCamelCase__ )
def check_simbol(_lowerCAmelCase : List[Any] ):
__snake_case : Optional[Any] = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 2:
__snake_case : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2_a1 and c <= 0xc2_bf)
or (c >= 0xc7_80 and c <= 0xc7_83)
or (c >= 0xca_b9 and c <= 0xcb_bf)
or (c >= 0xcc_80 and c <= 0xcd_a2)
):
return True
return False
def checkuae(_lowerCAmelCase : int ):
__snake_case : Optional[int] = x.encode()
if len(UpperCamelCase__ ) == 1 and len(UpperCamelCase__ ) == 3:
__snake_case : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_80_80 and c <= 0xe2_b0_7f:
return True
return False
__snake_case : Any = 0
__snake_case : int = []
while pos < len(UpperCamelCase__ ):
__snake_case : Any = min(len(UpperCamelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__snake_case : int = [] # (token_id, token, pos)
for e in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
__snake_case : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase__ ) > 2:
__snake_case : str = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase__ ) > 0:
# the smallest token_id is adopted
__snake_case , __snake_case , __snake_case : int = sorted(UpperCamelCase__ , key=lambda _lowerCAmelCase : x[0] )[0]
result.append(UpperCamelCase__ )
__snake_case : Optional[Any] = e
else:
__snake_case : Any = pos + 1
__snake_case : Tuple = text[pos:end]
if check_simbol(UpperCamelCase__ ):
result.append("""<KIGOU>""" )
elif checkuae(UpperCamelCase__ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__snake_case : List[str] = end
return result
def snake_case__ ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int="\n" ):
__snake_case : List[str] = []
__snake_case : Dict = []
__snake_case : Tuple = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__snake_case : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(UpperCamelCase__ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
words.append(bytearray(UpperCamelCase__ ).decode("""utf-8""" , errors="""replace""" ) )
__snake_case : Any = """""".join(UpperCamelCase__ )
return text
| 362 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["OwlViTFeatureExtractor"]
lowercase_ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase_ = datasets.load_iris()
lowercase_ = np.array(data["data"])
lowercase_ = np.array(data["target"])
lowercase_ = data["target_names"]
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(X, y)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
return np.linalg.norm(np.array(snake_case__ ) - np.array(snake_case__ ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any=5 ):
'''simple docstring'''
__snake_case : List[Any] = zip(snake_case__ , snake_case__ )
# List of distances of all points from the point to be classified
__snake_case : Optional[Any] = []
for data_point in data:
__snake_case : Optional[Any] = euclidean_distance(data_point[0] , snake_case__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Optional[int] = [i[1] for i in sorted(snake_case__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : List[Any] = Counter(snake_case__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20 | 0 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE__ ( logging.LoggerAdapter ):
@staticmethod
def snake_case__ ( _lowerCAmelCase : Optional[Any] ):
__snake_case : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def snake_case__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Tuple ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
__snake_case : Optional[int] = kwargs.pop("""main_process_only""" , __a )
__snake_case : Any = kwargs.pop("""in_order""" , __a )
if self.isEnabledFor(__a ):
if self._should_log(__a ):
__snake_case , __snake_case : Union[str, Any] = self.process(__a , __a )
self.logger.log(__a , __a , *__a , **__a )
elif in_order:
__snake_case : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__snake_case , __snake_case : Any = self.process(__a , __a )
self.logger.log(__a , __a , *__a , **__a )
state.wait_for_everyone()
def __lowerCAmelCase ( __UpperCAmelCase : str , __UpperCAmelCase : str = None ):
'''simple docstring'''
if log_level is None:
__snake_case : Tuple = os.environ.get("""ACCELERATE_LOG_LEVEL""" , snake_case_ )
__snake_case : Tuple = logging.getLogger(snake_case_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case_ , {} )
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A : bool = field(
default=_UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A : bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[str] = field(default=_UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
A : Optional[str] = field(
default=_UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
A : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=_UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A : Optional[int] = field(
default=_UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case__ ( self : Dict ):
if self.train_file is not None:
__snake_case : List[str] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : PreTrainedTokenizerBase
A : Union[bool, str, PaddingStrategy] = True
A : Optional[int] = None
A : Optional[int] = None
def __call__( self : str , _lowerCAmelCase : Union[str, Any] ):
__snake_case : int = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : int = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : int = len(_UpperCAmelCase )
__snake_case : List[str] = len(features[0]["""input_ids"""] )
__snake_case : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : List[str] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__snake_case : List[Any] = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : List[str] = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : List[str] = {}
if data_args.train_file is not None:
__snake_case : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : Dict = data_args.validation_file
__snake_case : Dict = data_args.train_file.split(""".""" )[-1]
__snake_case : Any = load_dataset(
lowerCAmelCase_ , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : int = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : Optional[Any] = [F'''ending{i}''' for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : List[str] = 'sent2'
if data_args.max_seq_length is None:
__snake_case : str = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__snake_case : Optional[Any] = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__snake_case : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__SCREAMING_SNAKE_CASE : List[Any] ):
__snake_case : List[str] = [[context] * 4 for context in examples[context_name]]
__snake_case : List[str] = examples[question_header_name]
__snake_case : List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCAmelCase_ )
]
# Flatten out
__snake_case : List[Any] = list(chain(*lowerCAmelCase_ ) )
__snake_case : Tuple = list(chain(*lowerCAmelCase_ ) )
# Tokenize
__snake_case : Union[str, Any] = tokenizer(
lowerCAmelCase_ , lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__snake_case : List[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Dict = min(len(lowerCAmelCase_ ) , data_args.max_train_samples )
__snake_case : Tuple = train_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__snake_case : int = train_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : str = min(len(lowerCAmelCase_ ) , data_args.max_eval_samples )
__snake_case : Optional[int] = eval_dataset.select(range(lowerCAmelCase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__snake_case : str = eval_dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : Optional[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__SCREAMING_SNAKE_CASE : Any ):
__snake_case : Optional[Any] = eval_predictions
__snake_case : Dict = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__snake_case : List[str] = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : int = last_checkpoint
__snake_case : Union[str, Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : Optional[Any] = train_result.metrics
__snake_case : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__snake_case : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__snake_case : Union[str, Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 368 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
def decorator(__SCREAMING_SNAKE_CASE : Tuple ):
__snake_case : str = getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
handle += [key]
setattr(__SCREAMING_SNAKE_CASE , """handle_key""" , __SCREAMING_SNAKE_CASE )
return func
return decorator
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def decorator(__SCREAMING_SNAKE_CASE : Any ):
__snake_case : Dict = getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
handle += keys
setattr(__SCREAMING_SNAKE_CASE , """handle_key""" , __SCREAMING_SNAKE_CASE )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __new__( cls : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
__snake_case : Any = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , """key_handler""" ):
setattr(UpperCamelCase__ , """key_handler""" , {} )
setattr(UpperCamelCase__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case : List[Any] = getattr(UpperCamelCase__ , """handle_key""" , [] )
for key in handled_keys:
__snake_case : List[str] = value
return new_cls
@staticmethod
def snake_case__ ( cls : Union[str, Any] ):
__snake_case : List[str] = get_character()
if char != KEYMAP["undefined"]:
__snake_case : int = ord(UpperCamelCase__ )
__snake_case : str = cls.key_handler.get(UpperCamelCase__ )
if handler:
__snake_case : Dict = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 369 | import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__snake_case : Union[str, Any] = [True] * (num + 1)
__snake_case : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __SCREAMING_SNAKE_CASE ):
__snake_case : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 371 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "sentencepiece.bpe.model"}
lowercase_ = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
lowercase_ = {
"facebook/xglm-564M": 20_48,
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Tuple = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : str = ["input_ids", "attention_mask"]
def __init__( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : str="<s>" , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Union[str, Any]="<pad>" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Dict , ):
__snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__snake_case : Dict = 7
__snake_case : List[Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__snake_case : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
__snake_case : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__snake_case : Optional[int] = len(self.sp_model )
__snake_case : Optional[Any] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
__snake_case : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
__snake_case : int = self.__dict__.copy()
__snake_case : List[Any] = None
__snake_case : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[int] = {}
__snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__snake_case : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case__ ( self : Dict , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case__ ( self : Union[str, Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case__ ( self : str ):
__snake_case : Tuple = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def snake_case__ ( self : Any , _lowerCAmelCase : Tuple ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : Dict = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self : str , _lowerCAmelCase : Tuple ):
__snake_case : str = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : List[Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 350 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def snake_case__ ( self : Any ):
__snake_case : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__snake_case : List[str] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__snake_case : Union[str, Any] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : List[str] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
@slow
def snake_case__ ( self : Tuple ):
__snake_case : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__snake_case : Union[str, Any] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
__snake_case : Union[str, Any] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
__snake_case : Optional[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__snake_case : str = model(_lowerCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1e-3 ) )
| 351 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 0 |
from math import factorial, pi
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int = 3_0 ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
__snake_case : Optional[Any] = float(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int = 3_0 ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
__snake_case : Tuple = float(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 352 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Optional[int] = None
A : str = BloomTokenizerFast
A : Optional[Any] = BloomTokenizerFast
A : str = True
A : Optional[int] = False
A : List[str] = "tokenizer_file"
A : int = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def snake_case__ ( self : str ):
super().setUp()
__snake_case : List[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : int , **_lowerCAmelCase : List[Any] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : List[str] = self.get_rust_tokenizer()
__snake_case : int = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__snake_case : Optional[int] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__snake_case : Optional[int] = tokenizer.batch_encode_plus(_lowerCAmelCase )["""input_ids"""]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict , _lowerCAmelCase : Dict=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__snake_case : int = """This is a simple input"""
__snake_case : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case : Optional[int] = ("""This is a simple input""", """This is a pair""")
__snake_case : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.encode(_lowerCAmelCase , max_length=_lowerCAmelCase )
tokenizer_r.batch_encode_plus(_lowerCAmelCase , max_length=_lowerCAmelCase )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__snake_case : List[Any] = None # Hotfixing padding = None
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , )
def snake_case__ ( self : List[str] ):
__snake_case : str = self.get_rust_tokenizer()
__snake_case : str = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=_lowerCAmelCase )
__snake_case : List[str] = next(iter(_lowerCAmelCase ) )["""premise"""] # pick up one data
__snake_case : Tuple = list(sample_data.values() )
__snake_case : Any = list(map(tokenizer.encode , _lowerCAmelCase ) )
__snake_case : Optional[int] = [tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) for x in output_tokens]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 353 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
A : List[Any] = PegasusConfig
A : Dict = {}
A : List[str] = "gelu"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : int=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Any=40 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Optional[int]=0 , ):
__snake_case : Optional[Any] = parent
__snake_case : str = batch_size
__snake_case : str = seq_length
__snake_case : List[Any] = is_training
__snake_case : Union[str, Any] = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Any = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Union[str, Any] = eos_token_id
__snake_case : Tuple = pad_token_id
__snake_case : Any = bos_token_id
def snake_case__ ( self : Dict ):
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : Tuple = prepare_pegasus_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
__snake_case : Optional[Any] = TFPegasusModel(config=_lowerCAmelCase ).get_decoder()
__snake_case : Optional[int] = inputs_dict["""input_ids"""]
__snake_case : Optional[int] = input_ids[:1, :]
__snake_case : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
__snake_case : str = inputs_dict["""head_mask"""]
__snake_case : Tuple = 1
# first forward pass
__snake_case : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__snake_case : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__snake_case : str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__snake_case : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-3 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=None , ):
'''simple docstring'''
if attention_mask is None:
__snake_case : Optional[Any] = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : Dict = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A : Union[str, Any] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A : Union[str, Any] = True
A : Any = False
A : Dict = False
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[Any] = TFPegasusModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : Tuple = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A : Dict = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A : Union[str, Any] = "google/pegasus-xsum"
@cached_property
def snake_case__ ( self : Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case__ ( self : List[str] ):
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case__ ( self : Tuple , **_lowerCAmelCase : Optional[Any] ):
__snake_case : Dict = self.translate_src_text(**_lowerCAmelCase )
assert self.expected_text == generated_words
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Optional[int] ):
__snake_case : Tuple = self.tokenizer(self.src_text , **_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""tf""" )
__snake_case : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCAmelCase , )
__snake_case : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def snake_case__ ( self : int ):
self._assert_generated_batch_equal_expected()
| 354 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 355 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 356 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : List[str] = ZeroShotClassificationPipeline(
model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
__snake_case : Any = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(_lowerCAmelCase , {"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase )]} )
# No kwarg
__snake_case : Union[str, Any] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(_lowerCAmelCase , {"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase )]} )
__snake_case : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(_lowerCAmelCase , {"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase )]} )
__snake_case : Optional[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
_lowerCAmelCase , {"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__snake_case : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
_lowerCAmelCase , {"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__snake_case : Tuple = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(_lowerCAmelCase , {"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
__snake_case : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
_lowerCAmelCase , [
{"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]}
for i in range(1 )
] , )
__snake_case : Optional[int] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
_lowerCAmelCase , [
{"""sequence""": ANY(_lowerCAmelCase ), """labels""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], """scores""": [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_lowerCAmelCase ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(_lowerCAmelCase ):
classifier(_lowerCAmelCase , candidate_labels="""politics""" )
with self.assertRaises(_lowerCAmelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(_lowerCAmelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(_lowerCAmelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=_lowerCAmelCase , )
self.run_entailment_id(_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : Pipeline ):
__snake_case : Tuple = zero_shot_classifier.model.config
__snake_case : int = config.labelaid
__snake_case : Any = zero_shot_classifier.entailment_id
__snake_case : List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__snake_case : Tuple = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case : Optional[int] = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case : int = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__snake_case : List[Any] = original_labelaid
self.assertEqual(_lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def snake_case__ ( self : int ):
__snake_case : Dict = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_00 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def snake_case__ ( self : Optional[int] ):
__snake_case : str = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
__snake_case : int = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def snake_case__ ( self : Optional[int] ):
__snake_case : Tuple = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
__snake_case : List[str] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def snake_case__ ( self : Optional[Any] ):
__snake_case : List[Any] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
__snake_case : Any = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__snake_case : List[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=_lowerCAmelCase , )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
__snake_case : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__snake_case : int = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=_lowerCAmelCase , )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 357 | import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[Any] = "tapas"
def __init__( self : Optional[int] , _lowerCAmelCase : List[str]=3_05_22 , _lowerCAmelCase : Tuple=7_68 , _lowerCAmelCase : int=12 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : Optional[Any]=30_72 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=10_24 , _lowerCAmelCase : Dict=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Dict=10.0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : int=None , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=1.0 , _lowerCAmelCase : List[str]=1.0 , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Union[str, Any]="ratio" , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Dict=64 , _lowerCAmelCase : str=32 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__snake_case : List[str] = vocab_size
__snake_case : Dict = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : List[str] = hidden_act
__snake_case : Any = intermediate_size
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_sizes
__snake_case : Dict = initializer_range
__snake_case : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
__snake_case : int = positive_label_weight
__snake_case : str = num_aggregation_labels
__snake_case : Dict = aggregation_loss_weight
__snake_case : Dict = use_answer_as_supervision
__snake_case : Dict = answer_loss_importance
__snake_case : Union[str, Any] = use_normalized_answer_loss
__snake_case : Optional[Any] = huber_loss_delta
__snake_case : List[Any] = temperature
__snake_case : int = aggregation_temperature
__snake_case : Any = use_gumbel_for_cells
__snake_case : str = use_gumbel_for_aggregation
__snake_case : Any = average_approximation_function
__snake_case : int = cell_selection_preference
__snake_case : Dict = answer_loss_cutoff
__snake_case : Any = max_num_rows
__snake_case : List[Any] = max_num_columns
__snake_case : Tuple = average_logits_per_cell
__snake_case : int = select_one_column
__snake_case : Dict = allow_empty_column_selection
__snake_case : Tuple = init_cell_selection_weights_to_zero
__snake_case : Dict = reset_position_index_per_cell
__snake_case : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
__snake_case : Any = aggregation_labels
__snake_case : Dict = no_aggregation_label_index
if isinstance(self.aggregation_labels , _lowerCAmelCase ):
__snake_case : List[Any] = {int(_lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 358 | from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowercase_ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
lowercase_ = F'''https://www.google.com/search?q={query}&num=100'''
lowercase_ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
lowercase_ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
lowercase_ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"unc-nlp/lxmert-base-uncased": 5_12,
}
lowercase_ = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = VOCAB_FILES_NAMES
A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = LxmertTokenizer
def __init__( self : int , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]="[UNK]" , _lowerCAmelCase : str="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Optional[Any]="[CLS]" , _lowerCAmelCase : List[str]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__snake_case : Union[str, Any] = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : List[str] = tokenize_chinese_chars
__snake_case : Optional[int] = normalizer_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any=None ):
__snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : List[Any] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 360 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
__snake_case : Optional[int] = 1, 1
__snake_case : str = []
for i in range(1 , n + 1 ):
__snake_case : Dict = prev_numerator + 2 * prev_denominator
__snake_case : Dict = prev_numerator + prev_denominator
if len(str(__SCREAMING_SNAKE_CASE ) ) > len(str(__SCREAMING_SNAKE_CASE ) ):
result.append(__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = numerator
__snake_case : List[str] = denominator
return len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 361 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=1E-12 ):
'''simple docstring'''
__snake_case : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
__snake_case : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
A : CLIPConfig
A : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : str ):
__snake_case : List[Any] = FlaxCLIPVisionModule(self.config.vision_config )
__snake_case : Union[str, Any] = nn.Dense(self.config.projection_dim , use_bias=_lowerCAmelCase , dtype=self.dtype )
__snake_case : Optional[Any] = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__snake_case : Optional[int] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__snake_case : Union[str, Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
__snake_case : List[str] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
__snake_case : Tuple = self.vision_model(_lowerCAmelCase )[1]
__snake_case : int = self.visual_projection(_lowerCAmelCase )
__snake_case : List[Any] = jax_cosine_distance(_lowerCAmelCase , self.special_care_embeds )
__snake_case : Optional[int] = jax_cosine_distance(_lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__snake_case : List[str] = 0.0
__snake_case : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__snake_case : Tuple = jnp.round(_lowerCAmelCase , 3 )
__snake_case : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=_lowerCAmelCase )
# Use a lower threshold if an image has any special care concept
__snake_case : str = is_special_care * 0.01
__snake_case : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__snake_case : Any = jnp.round(_lowerCAmelCase , 3 )
__snake_case : Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Union[str, Any] = CLIPConfig
A : List[str] = "clip_input"
A : str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Tuple , _lowerCAmelCase : CLIPConfig , _lowerCAmelCase : Optional[Tuple] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : jnp.dtype = jnp.floataa , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
if input_shape is None:
__snake_case : List[str] = (1, 2_24, 2_24, 3)
__snake_case : Dict = self.module_class(config=_lowerCAmelCase , dtype=_lowerCAmelCase , **_lowerCAmelCase )
super().__init__(_lowerCAmelCase , _lowerCAmelCase , input_shape=_lowerCAmelCase , seed=_lowerCAmelCase , dtype=_lowerCAmelCase , _do_init=_do_init )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : jax.random.KeyArray , _lowerCAmelCase : Tuple , _lowerCAmelCase : FrozenDict = None ):
# init input tensor
__snake_case : int = jax.random.normal(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = jax.random.split(_lowerCAmelCase )
__snake_case : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
__snake_case : Union[str, Any] = self.module.init(_lowerCAmelCase , _lowerCAmelCase )["""params"""]
return random_params
def __call__( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : dict = None , ):
__snake_case : Optional[int] = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 363 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20 | 0 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , _lowerCAmelCase : Dict ):
# we need a list not a string, so do something to change the type
__snake_case : Any = arr.split(""",""" )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[Any] = [int(self.array[0] )] * len(self.array )
__snake_case : Union[str, Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__snake_case : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__snake_case : int = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase_ = input("please input some numbers:")
lowercase_ = SubArray(whole_array)
lowercase_ = array.solve_sub_array()
print(("the results is:", re))
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
__snake_case : List[Any] = question_encoder
__snake_case : Any = generator
__snake_case : Optional[int] = self.question_encoder
def snake_case__ ( self : str , _lowerCAmelCase : str ):
if os.path.isfile(_lowerCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__snake_case : Union[str, Any] = os.path.join(_lowerCAmelCase , """question_encoder_tokenizer""" )
__snake_case : Tuple = os.path.join(_lowerCAmelCase , """generator_tokenizer""" )
self.question_encoder.save_pretrained(_lowerCAmelCase )
self.generator.save_pretrained(_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Union[str, Any] , _lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case : List[Any] = kwargs.pop("""config""" , _lowerCAmelCase )
if config is None:
__snake_case : Any = RagConfig.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained(
_lowerCAmelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(
_lowerCAmelCase , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=_lowerCAmelCase , generator=_lowerCAmelCase )
def __call__( self : List[str] , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
return self.current_tokenizer(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
return self.generator.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ):
return self.generator.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.question_encoder
def snake_case__ ( self : Optional[Any] ):
__snake_case : Dict = self.generator
def snake_case__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : str = "longest" , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[str] , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , _lowerCAmelCase , )
if max_length is None:
__snake_case : str = self.current_tokenizer.model_max_length
__snake_case : Any = self(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , **_lowerCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case : str = self.current_tokenizer.model_max_length
__snake_case : Optional[Any] = self(
text_target=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Optional[int] = labels["""input_ids"""]
return model_inputs
| 365 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def snake_case__ ( self : Optional[int] ):
torch.manual_seed(0 )
__snake_case : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def snake_case__ ( self : int ):
torch.manual_seed(0 )
__snake_case : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def snake_case__ ( self : Dict ):
torch.manual_seed(0 )
__snake_case : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : List[Any] = self.dummy_uncond_unet
__snake_case : Dict = DDIMScheduler()
__snake_case : Any = self.dummy_vq_model
__snake_case : Any = LDMPipeline(unet=_lowerCAmelCase , vqvae=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : int = torch.manual_seed(0 )
__snake_case : Tuple = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" ).images
__snake_case : Optional[int] = torch.manual_seed(0 )
__snake_case : Optional[int] = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=_lowerCAmelCase )[0]
__snake_case : Optional[Any] = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__snake_case : Any = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
__snake_case : List[Any] = torch.manual_seed(0 )
__snake_case : Any = ldm(generator=_lowerCAmelCase , num_inference_steps=5 , output_type="""numpy""" ).images
__snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__snake_case : Optional[Any] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
__snake_case : Tuple = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 366 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase_ = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __lowerCAmelCase ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : set ):
'''simple docstring'''
__snake_case : Any = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__snake_case : List[Any] = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE , row + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , row - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col + 1 , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col - 1 , __SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__snake_case : List[Any] = new_id
# turn into Numpy arrays
__snake_case : int = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(__SCREAMING_SNAKE_CASE )
if reduce_labels:
__snake_case : int = 2_5_5
__snake_case : Dict = label - 1
__snake_case : Optional[int] = 2_5_5
__snake_case : int = label != ignore_index
__snake_case : int = np.not_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = pred_label[mask]
__snake_case : str = np.array(__SCREAMING_SNAKE_CASE )[mask]
__snake_case : List[str] = pred_label[pred_label == label]
__snake_case : str = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__snake_case : Tuple = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__snake_case : Union[str, Any] = np.histogram(__SCREAMING_SNAKE_CASE , bins=__SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__snake_case : Optional[int] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__snake_case : int = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : Union[str, Any] = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa )
__snake_case : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : Tuple = intersect_and_union(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__snake_case : int = total_intersect_and_union(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# compute metrics
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__snake_case : List[Any] = total_area_intersect / total_area_union
__snake_case : List[Any] = total_area_intersect / total_area_label
__snake_case : Optional[int] = np.nanmean(__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = np.nanmean(__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = all_acc
__snake_case : Dict = iou
__snake_case : Union[str, Any] = acc
if nan_to_num is not None:
__snake_case : Optional[Any] = {metric: np.nan_to_num(__SCREAMING_SNAKE_CASE , nan=__SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def snake_case__ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def snake_case__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : bool , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Dict[int, int]] = None , _lowerCAmelCase : bool = False , ):
__snake_case : str = mean_iou(
results=_lowerCAmelCase , gt_seg_maps=_lowerCAmelCase , num_labels=_lowerCAmelCase , ignore_index=_lowerCAmelCase , nan_to_num=_lowerCAmelCase , label_map=_lowerCAmelCase , reduce_labels=_lowerCAmelCase , )
return iou_result
| 369 | import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(__SCREAMING_SNAKE_CASE ) <= 1 or n <= 1:
return
insert_next(__SCREAMING_SNAKE_CASE , n - 1 )
rec_insertion_sort(__SCREAMING_SNAKE_CASE , n - 1 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(__SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__snake_case : int = (
collection[index],
collection[index - 1],
)
insert_next(__SCREAMING_SNAKE_CASE , index + 1 )
if __name__ == "__main__":
lowercase_ = input("Enter integers separated by spaces: ")
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , _lowerCAmelCase : int ):
__snake_case : Optional[int] = size
__snake_case : List[Any] = [0] * size
__snake_case : Dict = [0] * size
@staticmethod
def snake_case__ ( _lowerCAmelCase : int ):
return index | (index + 1)
@staticmethod
def snake_case__ ( _lowerCAmelCase : int ):
return (index & (index + 1)) - 1
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
__snake_case : Any = value
while index < self.size:
__snake_case : Optional[int] = self.get_prev(_lowerCAmelCase ) + 1
if current_left_border == index:
__snake_case : Dict = value
else:
__snake_case : str = max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Optional[Any] = self.get_next(_lowerCAmelCase )
def snake_case__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int ):
right -= 1 # Because of right is exclusive
__snake_case : Optional[Any] = 0
while left <= right:
__snake_case : List[str] = self.get_prev(_lowerCAmelCase )
if left <= current_left:
__snake_case : Optional[Any] = max(_lowerCAmelCase , self.tree[right] )
__snake_case : List[str] = current_left
else:
__snake_case : Union[str, Any] = max(_lowerCAmelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Optional[int] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__snake_case : List[Any] = model
__snake_case : Tuple = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
__snake_case : Optional[Any] = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__( self : List[Any] , **_lowerCAmelCase : Optional[int] ):
__snake_case : Any = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__snake_case : Union[str, Any] = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def snake_case__ ( self : Dict , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : Union[str, Any] ):
__snake_case : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__snake_case : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
__snake_case : Any = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__snake_case : List[str] = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
__snake_case : List[str] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Union[str, Any] , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Optional[Any] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : int , ):
__snake_case : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
__snake_case : str = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
__snake_case : Dict = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
__snake_case : List[Any] = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
__snake_case : int = Path(_lowerCAmelCase ).parent
__snake_case : Union[str, Any] = Path(_lowerCAmelCase ).name
__snake_case : str = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Optional[int] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : int , ):
__snake_case : List[Any] = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
__snake_case : List[Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 350 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
__snake_case : Dict = (l + r) // 2
if v[m] >= key:
__snake_case : Dict = m
else:
__snake_case : List[str] = m # noqa: E741
return r
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 0
__snake_case : str = [0] * len(__SCREAMING_SNAKE_CASE )
__snake_case : int = 1
__snake_case : Optional[Any] = v[0]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
__snake_case : Tuple = v[i]
elif v[i] > tail[length - 1]:
__snake_case : int = v[i]
length += 1
else:
__snake_case : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Dict = SavedModel()
__snake_case : Any = []
with open(os.path.join(__SCREAMING_SNAKE_CASE , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
__snake_case : Optional[int] = json.load(__SCREAMING_SNAKE_CASE )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] )
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
__snake_case : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__snake_case : Tuple = sorted(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__SCREAMING_SNAKE_CASE )
if strict and len(__SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__SCREAMING_SNAKE_CASE ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__SCREAMING_SNAKE_CASE , sep="""\n""" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
lowercase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 352 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase_ = sys.version_info >= (3, 10)
def __lowerCAmelCase ( _A : str=None , _A : Optional[int]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : float
A : str
A : bool
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int = 42
A : str = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = False
A : bool = True
A : Optional[bool] = None
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Tuple = "titi"
A : List[str] = "toto"
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "titi"
A : Tuple = "toto"
A : Optional[int] = 42
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : BasicEnum = "toto"
def snake_case__ ( self : Any ):
__snake_case : Tuple = BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : MixedTypeEnum = "toto"
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Optional[int] = None
A : Optional[float] = field(default=__UpperCamelCase , metadata={"help": "help message"} )
A : Optional[str] = None
A : Optional[List[str]] = list_field(default=[] )
A : Optional[List[int]] = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : List[int] = list_field(default=[] )
A : List[int] = list_field(default=[1, 2, 3] )
A : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : List[int] = field()
A : str = field()
A : BasicEnum = field()
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : "BasicEnum" = field()
A : "Optional[bool]" = None
A : "str" = field(default="toto" , metadata={"help": "help message"} )
A : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = False
A : bool = True
A : bool | None = None
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int | None = None
A : float | None = field(default=__UpperCamelCase , metadata={"help": "help message"} )
A : str | None = None
A : list[str] | None = list_field(default=[] )
A : list[int] | None = list_field(default=[] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Dict , _lowerCAmelCase : argparse.ArgumentParser , _lowerCAmelCase : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__snake_case : Optional[Any] = {k: v for k, v in vars(_lowerCAmelCase ).items() if k != """container"""}
__snake_case : Tuple = {k: v for k, v in vars(_lowerCAmelCase ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _lowerCAmelCase ) and yy.get("""choices""" , _lowerCAmelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_lowerCAmelCase ) , yy["""type"""](_lowerCAmelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : int = HfArgumentParser(_lowerCAmelCase )
__snake_case : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--bar""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--baz""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--flag""" , type=_lowerCAmelCase , default=_lowerCAmelCase , const=_lowerCAmelCase , nargs="""?""" )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(__snake_case ) : str = parser.parse_args_into_dataclasses(_lowerCAmelCase , look_for_args_file=_lowerCAmelCase )
self.assertFalse(example.flag )
def snake_case__ ( self : List[str] ):
__snake_case : str = HfArgumentParser(_lowerCAmelCase )
__snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_lowerCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCAmelCase , help="""help message""" )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCAmelCase , default=_lowerCAmelCase , const=_lowerCAmelCase , nargs="""?""" )
expected.add_argument("""--baz""" , type=_lowerCAmelCase , default=_lowerCAmelCase , const=_lowerCAmelCase , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_lowerCAmelCase , dest="""baz""" )
expected.add_argument("""--opt""" , type=_lowerCAmelCase , default=_lowerCAmelCase )
__snake_case : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCAmelCase )
for dataclass_type in dataclass_types:
__snake_case : str = HfArgumentParser(_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Any = parser.parse_args([] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : List[Any] = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
__snake_case : Tuple = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , baz=_lowerCAmelCase , opt=_lowerCAmelCase ) )
def snake_case__ ( self : List[str] ):
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
__snake_case : Optional[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__snake_case : Tuple = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
__snake_case : str = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__snake_case : List[str] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
__snake_case : List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case__ ( self : Dict ):
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Literal["titi", "toto", 42] = "toto"
__snake_case : Any = HfArgumentParser(_lowerCAmelCase )
__snake_case : int = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
__snake_case : List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
__snake_case : Tuple = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def snake_case__ ( self : Dict ):
__snake_case : Optional[int] = HfArgumentParser(_lowerCAmelCase )
__snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_lowerCAmelCase )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_lowerCAmelCase )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCAmelCase )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = parser.parse_args([] )
self.assertEqual(
_lowerCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
__snake_case : Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(_lowerCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def snake_case__ ( self : str ):
__snake_case : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_lowerCAmelCase , type=_lowerCAmelCase )
expected.add_argument("""--bar""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""help message""" )
expected.add_argument("""--baz""" , default=_lowerCAmelCase , type=_lowerCAmelCase )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_lowerCAmelCase )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_lowerCAmelCase )
__snake_case : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCAmelCase )
for dataclass_type in dataclass_types:
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCAmelCase , Namespace(foo=_lowerCAmelCase , bar=_lowerCAmelCase , baz=_lowerCAmelCase , ces=[] , des=[] ) )
__snake_case : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(_lowerCAmelCase , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Any = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument("""--required_str""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCAmelCase , )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_lowerCAmelCase , required=_lowerCAmelCase )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=_lowerCAmelCase , )
expected.add_argument("""--opt""" , type=_lowerCAmelCase , default=_lowerCAmelCase )
expected.add_argument("""--baz""" , default="""toto""" , type=_lowerCAmelCase , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_lowerCAmelCase )
self.argparsersEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : str = HfArgumentParser(_lowerCAmelCase )
__snake_case : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
__snake_case : Tuple = parser.parse_dict(_lowerCAmelCase )[0]
__snake_case : int = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : str = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_lowerCAmelCase , parser.parse_dict , _lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = HfArgumentParser(_lowerCAmelCase )
__snake_case : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = os.path.join(_lowerCAmelCase , """temp_json""" )
os.mkdir(_lowerCAmelCase )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
__snake_case : int = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = HfArgumentParser(_lowerCAmelCase )
__snake_case : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : int = os.path.join(_lowerCAmelCase , """temp_yaml""" )
os.mkdir(_lowerCAmelCase )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
__snake_case : Tuple = BasicExample(**_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = HfArgumentParser(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 353 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 354 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Tuple ):
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : str = BlipImageProcessor()
__snake_case : Union[str, Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__snake_case : Optional[int] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__snake_case : str = InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[Any] , **_lowerCAmelCase : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def snake_case__ ( self : Dict , **_lowerCAmelCase : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def snake_case__ ( self : List[Any] , **_lowerCAmelCase : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def snake_case__ ( self : Any ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : List[Any] ):
__snake_case : int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__snake_case : Union[str, Any] = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
__snake_case : List[str] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_qformer_tokenizer()
__snake_case : str = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : Any = image_processor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : int = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : int ):
__snake_case : List[Any] = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_qformer_tokenizer()
__snake_case : Optional[int] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
__snake_case : Any = """lower newer"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : List[str] = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
__snake_case : List[Any] = qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Dict = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Optional[int] = self.get_qformer_tokenizer()
__snake_case : Optional[int] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
__snake_case : Union[str, Any] = """lower newer"""
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Optional[int] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def snake_case__ ( self : str ):
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : List[str] = self.get_qformer_tokenizer()
__snake_case : int = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
__snake_case : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Union[str, Any] = processor.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : str = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : List[str] = self.get_qformer_tokenizer()
__snake_case : str = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
__snake_case : Union[str, Any] = """lower newer"""
__snake_case : List[str] = self.prepare_image_inputs()
__snake_case : List[Any] = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 355 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : BigBirdConfig
A : jnp.dtype = jnp.floataa
A : bool = True
def snake_case__ ( self : str ):
super().setup()
__snake_case : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : List[str] ):
__snake_case : str = super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = FlaxBigBirdForNaturalQuestionsModule
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def cross_entropy(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
__snake_case : Dict = logits.shape[-1]
__snake_case : List[str] = (labels[..., None] == jnp.arange(__SCREAMING_SNAKE_CASE )[None]).astype("""f4""" )
__snake_case : Dict = jax.nn.log_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__snake_case : Tuple = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__snake_case : List[Any] = reduction(__SCREAMING_SNAKE_CASE )
return loss
__snake_case : Union[str, Any] = partial(__SCREAMING_SNAKE_CASE , reduction=jnp.mean )
__snake_case : List[str] = cross_entropy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = cross_entropy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Tuple = cross_entropy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = "google/bigbird-roberta-base"
A : int = 3000
A : int = 10500
A : int = 128
A : int = 3
A : int = 1
A : int = 5
# tx_args
A : float = 3e-5
A : float = 0.0
A : int = 20000
A : float = 0.00_95
A : str = "bigbird-roberta-natural-questions"
A : str = "training-expt"
A : str = "data/nq-training.jsonl"
A : str = "data/nq-validation.jsonl"
def snake_case__ ( self : Tuple ):
os.makedirs(self.base_dir , exist_ok=_lowerCAmelCase )
__snake_case : Union[str, Any] = os.path.join(self.base_dir , self.save_dir )
__snake_case : Union[str, Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : int = 4096 # no dynamic padding on TPUs
def __call__( self : Tuple , _lowerCAmelCase : int ):
__snake_case : Union[str, Any] = self.collate_fn(_lowerCAmelCase )
__snake_case : List[Any] = jax.tree_util.tree_map(_lowerCAmelCase , _lowerCAmelCase )
return batch
def snake_case__ ( self : List[str] , _lowerCAmelCase : Tuple ):
__snake_case : Any = self.fetch_inputs(features["""input_ids"""] )
__snake_case : Optional[int] = {
"""input_ids""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def snake_case__ ( self : Any , _lowerCAmelCase : list ):
__snake_case : Any = [self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : list ):
__snake_case : Tuple = [1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
if seed is not None:
__snake_case : int = dataset.shuffle(seed=__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) // batch_size ):
__snake_case : Dict = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name="""batch""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
def loss_fn(__SCREAMING_SNAKE_CASE : str ):
__snake_case : Tuple = model_inputs.pop("""start_labels""" )
__snake_case : str = model_inputs.pop("""end_labels""" )
__snake_case : Dict = model_inputs.pop("""pooled_labels""" )
__snake_case : Optional[Any] = state.apply_fn(**__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE , dropout_rng=__SCREAMING_SNAKE_CASE , train=__SCREAMING_SNAKE_CASE )
__snake_case : int = outputs
return state.loss_fn(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
__snake_case : Optional[Any] = jax.random.split(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = jax.value_and_grad(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = grad_fn(state.params )
__snake_case : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
__snake_case : Optional[Any] = jax.lax.pmean(__SCREAMING_SNAKE_CASE , """batch""" )
__snake_case : Union[str, Any] = state.apply_gradients(grads=__SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__snake_case : Union[str, Any] = model_inputs.pop("""start_labels""" )
__snake_case : List[str] = model_inputs.pop("""end_labels""" )
__snake_case : Optional[Any] = model_inputs.pop("""pooled_labels""" )
__snake_case : Dict = state.apply_fn(**__SCREAMING_SNAKE_CASE , params=state.params , train=__SCREAMING_SNAKE_CASE )
__snake_case : Dict = outputs
__snake_case : str = state.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Any = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class SCREAMING_SNAKE_CASE__ ( train_state.TrainState ):
A : Callable = struct.field(pytree_node=__UpperCamelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : Args
A : Callable
A : Callable
A : Callable
A : Callable
A : wandb
A : Callable = None
def snake_case__ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None ):
__snake_case : List[str] = model.params
__snake_case : List[Any] = TrainState.create(
apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , loss_fn=_lowerCAmelCase , )
if ckpt_dir is not None:
__snake_case : Any = restore_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Any = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
__snake_case : Any = build_tx(**_lowerCAmelCase )
__snake_case : Tuple = train_state.TrainState(
step=_lowerCAmelCase , apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , opt_state=_lowerCAmelCase , )
__snake_case : Optional[Any] = args
__snake_case : Optional[int] = data_collator
__snake_case : List[str] = lr
__snake_case : Optional[Any] = params
__snake_case : List[Any] = jax_utils.replicate(_lowerCAmelCase )
return state
def snake_case__ ( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ):
__snake_case : str = self.args
__snake_case : Optional[Any] = len(_lowerCAmelCase ) // args.batch_size
__snake_case : List[Any] = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = jax.random.split(_lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__snake_case : Tuple = jnp.array(0 , dtype=jnp.floataa )
__snake_case : Tuple = get_batched_dataset(_lowerCAmelCase , args.batch_size , seed=_lowerCAmelCase )
__snake_case : Optional[int] = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc=f'''Running EPOCH-{epoch}''' ):
__snake_case : Union[str, Any] = self.data_collator(_lowerCAmelCase )
__snake_case : Optional[Any] = self.train_step_fn(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
__snake_case : List[str] = jax_utils.unreplicate(state.step )
__snake_case : Optional[Any] = running_loss.item() / i
__snake_case : Union[str, Any] = self.scheduler_fn(state_step - 1 )
__snake_case : Dict = self.evaluate(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase , commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
__snake_case : str = get_batched_dataset(_lowerCAmelCase , self.args.batch_size )
__snake_case : Optional[int] = len(_lowerCAmelCase ) // self.args.batch_size
__snake_case : List[Any] = jnp.array(0 , dtype=jnp.floataa )
__snake_case : Optional[Any] = 0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc="""Evaluating ... """ ):
__snake_case : Optional[Any] = self.data_collator(_lowerCAmelCase )
__snake_case : Union[str, Any] = self.val_step_fn(_lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def snake_case__ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : str ):
__snake_case : str = jax_utils.unreplicate(_lowerCAmelCase )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(_lowerCAmelCase , params=state.params )
with open(os.path.join(_lowerCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(_lowerCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _lowerCAmelCase )
print("""DONE""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(__SCREAMING_SNAKE_CASE , """flax_model.msgpack""" ) , """rb""" ) as f:
__snake_case : List[str] = from_bytes(state.params , f.read() )
with open(os.path.join(__SCREAMING_SNAKE_CASE , """opt_state.msgpack""" ) , """rb""" ) as f:
__snake_case : List[Any] = from_bytes(state.opt_state , f.read() )
__snake_case : Optional[int] = joblib.load(os.path.join(__SCREAMING_SNAKE_CASE , """args.joblib""" ) )
__snake_case : Tuple = joblib.load(os.path.join(__SCREAMING_SNAKE_CASE , """data_collator.joblib""" ) )
with open(os.path.join(__SCREAMING_SNAKE_CASE , """training_state.json""" ) , """r""" ) as f:
__snake_case : int = json.load(__SCREAMING_SNAKE_CASE )
__snake_case : str = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__snake_case : str = num_train_steps - warmup_steps
__snake_case : Optional[int] = optax.linear_schedule(init_value=__SCREAMING_SNAKE_CASE , end_value=__SCREAMING_SNAKE_CASE , transition_steps=__SCREAMING_SNAKE_CASE )
__snake_case : Any = optax.linear_schedule(init_value=__SCREAMING_SNAKE_CASE , end_value=1E-7 , transition_steps=__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
def weight_decay_mask(__SCREAMING_SNAKE_CASE : Dict ):
__snake_case : Dict = traverse_util.flatten_dict(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = scheduler_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Any = optax.adamw(learning_rate=__SCREAMING_SNAKE_CASE , weight_decay=__SCREAMING_SNAKE_CASE , mask=__SCREAMING_SNAKE_CASE )
return tx, lr
| 356 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "mgp-str"
def __init__( self : str , _lowerCAmelCase : Optional[int]=[32, 1_28] , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : str=3 , _lowerCAmelCase : List[str]=27 , _lowerCAmelCase : List[Any]=38 , _lowerCAmelCase : Dict=5_02_57 , _lowerCAmelCase : Tuple=3_05_22 , _lowerCAmelCase : List[str]=7_68 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Dict=4.0 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=1e-5 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : int=False , _lowerCAmelCase : Dict=0.02 , **_lowerCAmelCase : Dict , ):
super().__init__(**_lowerCAmelCase )
__snake_case : int = image_size
__snake_case : Tuple = patch_size
__snake_case : int = num_channels
__snake_case : Union[str, Any] = max_token_length
__snake_case : Any = num_character_labels
__snake_case : Tuple = num_bpe_labels
__snake_case : Any = num_wordpiece_labels
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[Any] = mlp_ratio
__snake_case : List[Any] = distilled
__snake_case : List[str] = layer_norm_eps
__snake_case : Union[str, Any] = drop_rate
__snake_case : Tuple = qkv_bias
__snake_case : str = attn_drop_rate
__snake_case : Any = drop_path_rate
__snake_case : Optional[int] = output_aa_attentions
__snake_case : Dict = initializer_range
| 357 | import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase_ = 25_00_04
lowercase_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Any = MBartTokenizer
A : Optional[Any] = MBartTokenizerFast
A : List[Any] = True
A : int = True
def snake_case__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Dict = MBartTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = MBartTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
__snake_case : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__snake_case : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__snake_case : str = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case__ ( self : Tuple ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[Any] = tempfile.mkdtemp()
__snake_case : Tuple = tokenizer_r.save_pretrained(_lowerCAmelCase )
__snake_case : Dict = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case : str = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
__snake_case : Union[str, Any] = tokenizer_r.from_pretrained(_lowerCAmelCase )
__snake_case : Dict = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
__snake_case : List[str] = tempfile.mkdtemp()
__snake_case : Dict = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
__snake_case : List[str] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
__snake_case : List[Any] = tokenizer_r.from_pretrained(_lowerCAmelCase )
__snake_case : int = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
__snake_case : int = tempfile.mkdtemp()
__snake_case : List[Any] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
__snake_case : List[str] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case : Union[str, Any] = tokenizer_r.from_pretrained(_lowerCAmelCase )
__snake_case : Tuple = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A : List[str] = "facebook/mbart-large-en-ro"
A : Optional[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A : Tuple = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : Tuple ):
__snake_case : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case : Optional[int] = 1
return cls
def snake_case__ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def snake_case__ ( self : Dict ):
__snake_case : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def snake_case__ ( self : str ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
__snake_case : Optional[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__snake_case : int = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
__snake_case : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Tuple = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _lowerCAmelCase )
__snake_case : Optional[Any] = 10
__snake_case : Dict = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def snake_case__ ( self : List[Any] ):
__snake_case : Union[str, Any] = tempfile.mkdtemp()
__snake_case : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
__snake_case : Any = MBartTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors="""pt""" )
__snake_case : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Tuple ):
__snake_case : Union[str, Any] = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
__snake_case : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
__snake_case : Union[str, Any] = targets["""input_ids"""]
__snake_case : Tuple = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : List[str] ):
__snake_case : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 358 | from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
lowercase_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] ):
__snake_case : List[str] = get_test_to_tester_mapping(_lowerCAmelCase )
__snake_case : Optional[Any] = get_test_to_tester_mapping(_lowerCAmelCase )
__snake_case : Optional[int] = {"""BertModelTest""": """BertModelTester"""}
__snake_case : Dict = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = get_model_to_test_mapping(_lowerCAmelCase )
__snake_case : Optional[Any] = get_model_to_test_mapping(_lowerCAmelCase )
__snake_case : List[str] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
__snake_case : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case__ ( self : List[str] ):
__snake_case : Any = get_model_to_tester_mapping(_lowerCAmelCase )
__snake_case : Any = get_model_to_tester_mapping(_lowerCAmelCase )
__snake_case : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
__snake_case : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(get_test_info.to_json(_lowerCAmelCase ) , _lowerCAmelCase )
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
def decorator(__SCREAMING_SNAKE_CASE : List[str] ):
__snake_case : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
handle += [key]
setattr(__SCREAMING_SNAKE_CASE , """handle_key""" , __SCREAMING_SNAKE_CASE )
return func
return decorator
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def decorator(__SCREAMING_SNAKE_CASE : Dict ):
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
handle += keys
setattr(__SCREAMING_SNAKE_CASE , """handle_key""" , __SCREAMING_SNAKE_CASE )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __new__( cls : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
__snake_case : Tuple = super().__new__(cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not hasattr(_lowerCAmelCase , """key_handler""" ):
setattr(_lowerCAmelCase , """key_handler""" , {} )
setattr(_lowerCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case : Optional[Any] = getattr(_lowerCAmelCase , """handle_key""" , [] )
for key in handled_keys:
__snake_case : int = value
return new_cls
@staticmethod
def snake_case__ ( cls : Optional[Any] ):
__snake_case : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
__snake_case : Tuple = ord(_lowerCAmelCase )
__snake_case : List[Any] = cls.key_handler.get(_lowerCAmelCase )
if handler:
__snake_case : Union[str, Any] = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 360 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : torch.FloatTensor
A : Optional[torch.FloatTensor] = None
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=0.9_99 , __SCREAMING_SNAKE_CASE : List[str]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__SCREAMING_SNAKE_CASE : Dict ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__SCREAMING_SNAKE_CASE : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case : Optional[Any] = []
for i in range(__SCREAMING_SNAKE_CASE ):
__snake_case : int = i / num_diffusion_timesteps
__snake_case : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__SCREAMING_SNAKE_CASE ) / alpha_bar_fn(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
return torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
A : Any = 1
@register_to_config
def __init__( self : Optional[Any] , _lowerCAmelCase : int = 10_00 , _lowerCAmelCase : float = 0.0001 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : str = "linear" , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 0 , _lowerCAmelCase : str = "epsilon" , _lowerCAmelCase : float = 1.0 , **_lowerCAmelCase : List[Any] , ):
if kwargs.get("""set_alpha_to_one""" , _lowerCAmelCase ) is not None:
__snake_case : str = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
__snake_case : List[Any] = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
__snake_case : Optional[int] = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case : Tuple = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : int = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case : Any = 1.0 - self.betas
__snake_case : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__snake_case : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__snake_case : Dict = 1.0
# setable values
__snake_case : List[str] = None
__snake_case : str = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Optional[int] = None ):
return sample
def snake_case__ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__snake_case : Any = num_inference_steps
__snake_case : Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case : str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
__snake_case : Optional[int] = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : bool = True , ):
# 1. get previous step value (=t+1)
__snake_case : str = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__snake_case : Any = self.alphas_cumprod[timestep]
__snake_case : Union[str, Any] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__snake_case : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__snake_case : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__snake_case : int = model_output
elif self.config.prediction_type == "sample":
__snake_case : Any = model_output
__snake_case : List[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__snake_case : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__snake_case : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__snake_case : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : int = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self : int ):
return self.config.num_train_timesteps
| 361 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
__snake_case : Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__snake_case : str = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
__snake_case : str = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
__snake_case : Dict = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
__snake_case : Union[str, Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
__snake_case : Any = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
__snake_case : int = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
__snake_case : int = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
__snake_case : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
__snake_case : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
__snake_case : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
__snake_case : Dict = key.replace("""text_encoder.module""" , """flava.text_model""" )
__snake_case : int = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
__snake_case : Union[str, Any] = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
__snake_case : Optional[Any] = key.replace("""text_projection""" , """flava.text_projection""" )
__snake_case : List[str] = key.replace("""image_projection""" , """flava.image_projection""" )
__snake_case : Optional[Any] = value.float()
for key, value in codebook_state_dict.items():
__snake_case : Optional[Any] = value
return upgrade
@torch.no_grad()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any=None ):
'''simple docstring'''
if config_path is not None:
__snake_case : Tuple = FlavaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__snake_case : Dict = FlavaConfig()
__snake_case : List[Any] = FlavaForPreTraining(__SCREAMING_SNAKE_CASE ).eval()
__snake_case : int = convert_dalle_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , save_checkpoint=__SCREAMING_SNAKE_CASE )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
__snake_case : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
else:
__snake_case : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Dict = upgrade_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = hf_model.state_dict()
__snake_case : Union[str, Any] = count_parameters(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = count_parameters(__SCREAMING_SNAKE_CASE ) + count_parameters(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase_ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 362 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[Any] , _lowerCAmelCase : NestedDataStructureLike[PathLike] , _lowerCAmelCase : Optional[NamedSplit] = None , _lowerCAmelCase : Optional[Features] = None , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : List[Any] , ):
super().__init__(
_lowerCAmelCase , split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : List[Any] = field
__snake_case : Union[str, Any] = path_or_paths if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else {self.split: path_or_paths}
__snake_case : List[str] = Json(
cache_dir=_lowerCAmelCase , data_files=_lowerCAmelCase , features=_lowerCAmelCase , field=_lowerCAmelCase , **_lowerCAmelCase , )
def lowerCAmelCase__ ( self : Optional[int] ):
# Build iterable dataset
if self.streaming:
__snake_case : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = None
__snake_case : List[Any] = None
__snake_case : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
__snake_case : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , _lowerCAmelCase : Dataset , _lowerCAmelCase : Union[PathLike, BinaryIO] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__snake_case : List[str] = dataset
__snake_case : Optional[Any] = path_or_buf
__snake_case : Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Union[str, Any] = num_proc
__snake_case : Any = """utf-8"""
__snake_case : List[Any] = to_json_kwargs
def lowerCAmelCase__ ( self : Union[str, Any] ):
__snake_case : int = self.to_json_kwargs.pop("""path_or_buf""" , _lowerCAmelCase )
__snake_case : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
__snake_case : List[Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__snake_case : List[str] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__snake_case : Dict = self.to_json_kwargs.pop("""compression""" , _lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=_lowerCAmelCase ) as buffer:
__snake_case : str = self._write(file_obj=_lowerCAmelCase , orient=_lowerCAmelCase , lines=_lowerCAmelCase , index=_lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
__snake_case : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=_lowerCAmelCase , lines=_lowerCAmelCase , index=_lowerCAmelCase , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : List[Any] ):
__snake_case : List[str] = args
__snake_case : Tuple = query_table(
table=self.dataset.data , key=slice(_lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Optional[int] = batch.to_pandas().to_json(
path_or_buf=_lowerCAmelCase , orient=_lowerCAmelCase , lines=_lowerCAmelCase , index=_lowerCAmelCase , **_lowerCAmelCase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : BinaryIO , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] , ):
__snake_case : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__snake_case : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_lowerCAmelCase )
else:
__snake_case : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _lowerCAmelCase , _lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(_lowerCAmelCase )
return written
| 363 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if index == r:
for j in range(__SCREAMING_SNAKE_CASE ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : List[Any] = arr[i]
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , __SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : List[Any] ):
__snake_case : List[str] = tempfile.mkdtemp()
# fmt: off
__snake_case : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__snake_case : Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
__snake_case : List[Any] = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Dict ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : List[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Tuple ):
__snake_case : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : Any ):
__snake_case : Dict = self.get_tokenizer()
__snake_case : List[str] = self.get_image_processor()
__snake_case : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__snake_case : Tuple = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
__snake_case : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Optional[Any] = image_processor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : List[Any] = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Dict = """lower newer"""
__snake_case : Dict = processor(text=_lowerCAmelCase )
__snake_case : int = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : Tuple ):
__snake_case : Any = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : List[str] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : Any = """lower newer"""
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Dict = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCAmelCase ):
processor()
def snake_case__ ( self : int ):
__snake_case : Any = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : List[Any] = processor.batch_decode(_lowerCAmelCase )
__snake_case : Any = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
__snake_case : str = """lower newer"""
__snake_case : int = self.prepare_image_inputs()
__snake_case : int = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 365 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MaskFormerFeatureExtractor"]
lowercase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
lowercase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 366 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
def __lowerCAmelCase ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A : Optional[str] = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A : str = field(metadata={"help": "Should contain the data files for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
__snake_case : int = processors[data_args.task_name]()
__snake_case : Optional[Any] = processor.get_labels()
__snake_case : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__snake_case : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCAmelCase : EvalPrediction ) -> Dict:
__snake_case : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
__snake_case : int = DataCollatorWithPadding(__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=__SCREAMING_SNAKE_CASE , eval_dataset=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__snake_case : Dict = trainer.evaluate()
__snake_case : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ = "pt"
elif is_tf_available():
lowercase_ = "tf"
else:
lowercase_ = "jax"
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : int = ByTaTokenizer
A : Tuple = False
def snake_case__ ( self : List[Any] ):
super().setUp()
__snake_case : int = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : Tuple ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def snake_case__ ( self : List[Any] , **_lowerCAmelCase : int ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Union[str, Any]=20 , _lowerCAmelCase : int=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
__snake_case : Dict = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : Optional[int] = list(filter(lambda _lowerCAmelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , _lowerCAmelCase ) )
__snake_case : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
__snake_case : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : int = [t[0] for t in toks]
# Ensure consistency
__snake_case : Dict = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
__snake_case : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
__snake_case : List[str] = """ """ + output_txt
__snake_case : str = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def snake_case__ ( self : List[Any] ):
__snake_case : Optional[Any] = self.ta_base_tokenizer
__snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__snake_case : Tuple = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def snake_case__ ( self : Dict ):
__snake_case : Dict = self.ta_base_tokenizer
__snake_case : Any = """Unicode €."""
__snake_case : Dict = tokenizer(_lowerCAmelCase )
__snake_case : Optional[Any] = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , _lowerCAmelCase )
# decoding
__snake_case : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , """Unicode €.</s>""" )
__snake_case : List[Any] = tokenizer("""e è é ê ë""" )
__snake_case : Any = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , _lowerCAmelCase )
# decoding
__snake_case : Dict = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.ta_base_tokenizer
__snake_case : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__snake_case : Optional[int] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__snake_case : int = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
__snake_case : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Optional[int] = self.ta_base_tokenizer
__snake_case : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__snake_case : Union[str, Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _lowerCAmelCase )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , _lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , _lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = self.ta_base_tokenizer
__snake_case : int = [
"""Summary of the text.""",
"""Another summary.""",
]
__snake_case : Optional[Any] = tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def snake_case__ ( self : Optional[int] ):
__snake_case : List[str] = self.ta_base_tokenizer
__snake_case : Optional[Any] = ["""A long paragraph for summarization. </s>"""]
__snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
__snake_case : List[str] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__snake_case : str = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__snake_case : List[Any] = tokenizer(_lowerCAmelCase , text_target=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(_lowerCAmelCase , batch["""labels"""][0] )
def snake_case__ ( self : Optional[int] ):
# safety check on max_len default value so we are sure the test works
__snake_case : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : int = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : List[str] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
__snake_case : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__snake_case : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : str = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[int] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : Any = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Optional[int] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : str = json.load(_lowerCAmelCase )
__snake_case : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(1_25 )]
__snake_case : str = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Any = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_lowerCAmelCase )]
__snake_case : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(_lowerCAmelCase )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def snake_case__ ( self : List[Any] ):
pass
def snake_case__ ( self : Optional[int] ):
pass
def snake_case__ ( self : Dict ):
pass
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : Tuple ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__snake_case : int = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Optional[int] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__snake_case : str = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : int = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Optional[int] = 0
__snake_case : str = tokenizer.convert_ids_to_tokens(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + """_id""" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + """_id""" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + """_id""" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + """_id""" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(_lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 368 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
lowercase_ = "bert-base-cased"
lowercase_ = "fp16"
lowercase_ = "bf16"
lowercase_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : Tuple ):
super().setUp()
__snake_case : int = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def snake_case__ ( self : Optional[int] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_lowerCAmelCase ):
__snake_case : str = self.dist_env.copy()
__snake_case : int = f'''{i + 1}'''
__snake_case : List[str] = strategy
with mockenv_context(**_lowerCAmelCase ):
__snake_case : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def snake_case__ ( self : Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_lowerCAmelCase ):
__snake_case : int = self.dist_env.copy()
__snake_case : int = prefetch_policy
with mockenv_context(**_lowerCAmelCase ):
__snake_case : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def snake_case__ ( self : str ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_lowerCAmelCase ):
__snake_case : Union[str, Any] = self.dist_env.copy()
__snake_case : Any = state_dict_type
with mockenv_context(**_lowerCAmelCase ):
__snake_case : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def snake_case__ ( self : List[Any] ):
__snake_case : Optional[Any] = AutoModel.from_pretrained(_lowerCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
__snake_case : str = self.dist_env.copy()
__snake_case : List[str] = policy
if policy == "TRANSFORMER_BASED_WRAP":
__snake_case : List[str] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
__snake_case : str = """2000"""
with mockenv_context(**_lowerCAmelCase ):
__snake_case : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_lowerCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__snake_case : int = self.dist_env.copy()
__snake_case : Any = """TRANSFORMER_BASED_WRAP"""
__snake_case : Optional[Any] = """T5Layer"""
with mockenv_context(**_lowerCAmelCase ):
__snake_case : str = FullyShardedDataParallelPlugin()
with self.assertRaises(_lowerCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_lowerCAmelCase )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
__snake_case : Any = self.dist_env.copy()
__snake_case : str = """SIZE_BASED_WRAP"""
__snake_case : Optional[int] = """0"""
with mockenv_context(**_lowerCAmelCase ):
__snake_case : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_lowerCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def snake_case__ ( self : Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__snake_case : int = self.dist_env.copy()
__snake_case : Any = mp_dtype
with mockenv_context(**_lowerCAmelCase ):
__snake_case : Optional[Any] = Accelerator()
if mp_dtype == "fp16":
__snake_case : Tuple = torch.floataa
elif mp_dtype == "bf16":
__snake_case : Tuple = torch.bfloataa
__snake_case : List[str] = MixedPrecision(param_dtype=_lowerCAmelCase , reduce_dtype=_lowerCAmelCase , buffer_dtype=_lowerCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _lowerCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _lowerCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__snake_case : List[Any] = self.dist_env.copy()
__snake_case : Tuple = str(_lowerCAmelCase ).lower()
with mockenv_context(**_lowerCAmelCase ):
__snake_case : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_lowerCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : Tuple ):
super().setUp()
__snake_case : Optional[Any] = 0.82
__snake_case : List[Any] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
__snake_case : Dict = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__snake_case : List[str] = 1_60
__snake_case : Dict = 1_60
__snake_case : Dict = inspect.getfile(accelerate.test_utils )
__snake_case : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def snake_case__ ( self : List[str] ):
__snake_case : List[str] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
__snake_case : Union[str, Any] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
__snake_case : Tuple = cmd.copy()
for i, strategy in enumerate(_lowerCAmelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Any = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
__snake_case : Dict = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(_lowerCAmelCase ):
__snake_case : int = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
__snake_case : List[Any] = len(_lowerCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__snake_case : List[str] = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
__snake_case : Optional[int] = cmd_config[:-1]
__snake_case : Union[str, Any] = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
__snake_case : Dict = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__snake_case : Any = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(_lowerCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() )
| 369 | import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int = None , _lowerCAmelCase : int = None ):
super().__init__()
__snake_case : List[str] = pad_token_id
__snake_case : List[str] = max_length
__snake_case : Optional[Any] = vocab
__snake_case : List[str] = merges
__snake_case : str = BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Union[str, Any] , _lowerCAmelCase : GPTaTokenizer , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
__snake_case : int = [""" """.join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
__snake_case : Tuple = tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : str , _lowerCAmelCase : Union[str, os.PathLike] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : str ):
__snake_case : str = GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case__ ( cls : Optional[int] , _lowerCAmelCase : Tuple ):
return cls(**_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def snake_case__ ( self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int = None ):
__snake_case : Union[str, Any] = self.tf_tokenizer(_lowerCAmelCase )
__snake_case : List[Any] = tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__snake_case : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
__snake_case : str = pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 371 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
__snake_case : str = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
__snake_case : str = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__snake_case : Any = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if "visual_encoder" in key:
__snake_case : Tuple = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
__snake_case : int = re.sub(R"""blocks""" , """layers""" , __SCREAMING_SNAKE_CASE )
if "attn" in key:
__snake_case : Tuple = re.sub(R"""attn""" , """self_attn""" , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
__snake_case : Tuple = re.sub(R"""norm1""" , """layer_norm1""" , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
__snake_case : Tuple = re.sub(R"""norm2""" , """layer_norm2""" , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
__snake_case : Dict = re.sub(R"""encoder.norm""" , """post_layernorm""" , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
__snake_case : List[Any] = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
__snake_case : Tuple = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
__snake_case : int = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
__snake_case : List[str] = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
if config_path is not None:
__snake_case : Any = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__snake_case : Tuple = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__snake_case : Tuple = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
__snake_case : Tuple = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
__snake_case : str = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=3_8_4 , vit="""base""" )
__snake_case : List[str] = pt_model.eval()
__snake_case : Tuple = pt_model.state_dict()
for key in modified_state_dict.copy():
__snake_case : List[str] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = rename_key(__SCREAMING_SNAKE_CASE )
__snake_case : Any = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : int = 3_8_4
__snake_case : Optional[int] = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device="""cpu""" )
__snake_case : Optional[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__snake_case : Optional[int] = tokenizer(["""a picture of"""] ).input_ids
__snake_case : Any = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__snake_case : Union[str, Any] = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__snake_case : List[str] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
__snake_case : Dict = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="""base""" )
vqa_model.eval()
__snake_case : int = vqa_model.state_dict()
for key in modified_state_dict.copy():
__snake_case : int = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = rename_key(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = value
__snake_case : Any = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = ["""How many dogs are in this image?"""]
__snake_case : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
__snake_case : List[str] = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
__snake_case : int = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
__snake_case : int = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="""base""" )
itm_model.eval()
__snake_case : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
__snake_case : Optional[Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = rename_key(__SCREAMING_SNAKE_CASE )
__snake_case : Union[str, Any] = value
__snake_case : Union[str, Any] = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = ["""A picture of a woman with a dog sitting in a beach"""]
__snake_case : str = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding="""max_length""" , truncation=__SCREAMING_SNAKE_CASE , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
__snake_case : Tuple = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
__snake_case : Dict = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase_ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 350 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : Any = 1
__snake_case : Optional[int] = 1
__snake_case : List[Any] = {1: 1}
for inputa in range(2 , __SCREAMING_SNAKE_CASE ):
__snake_case : str = 0
__snake_case : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case : Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case : List[Any] = counter
if counter > pre_counter:
__snake_case : Tuple = inputa
__snake_case : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 351 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 352 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Optional[Any] , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Any=None ):
__snake_case : Any = {}
if top_k is not None:
__snake_case : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self : Any , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Tuple ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : int = load_image(_lowerCAmelCase )
__snake_case : List[Any] = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] ):
__snake_case : int = self.model(**_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__snake_case : Optional[Any] = self.model.config.num_labels
if self.framework == "pt":
__snake_case : Any = model_outputs.logits.softmax(-1 )[0]
__snake_case : List[Any] = probs.topk(_lowerCAmelCase )
elif self.framework == "tf":
__snake_case : Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
__snake_case : Union[str, Any] = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
__snake_case : Tuple = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__snake_case : Any = scores.tolist()
__snake_case : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCAmelCase , _lowerCAmelCase )]
| 353 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@add_start_docstrings(_lowerCAmelCase )
def __call__( self : Optional[Any] , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : str ):
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] = None ):
__snake_case : str = max_length
__snake_case : Any = max_position_embeddings
@add_start_docstrings(_lowerCAmelCase )
def __call__( self : Tuple , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : Union[str, Any] ):
__snake_case : int = input_ids.shape[-1]
__snake_case : Tuple = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , _lowerCAmelCase , )
__snake_case : Tuple = start_length
__snake_case : Dict = max_new_tokens
__snake_case : Any = start_length + max_new_tokens
@add_start_docstrings(_lowerCAmelCase )
def __call__( self : Tuple , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : List[Any] ):
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[str] , _lowerCAmelCase : float , _lowerCAmelCase : Optional[float] = None ):
__snake_case : Union[str, Any] = max_time
__snake_case : List[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowerCAmelCase )
def __call__( self : int , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : str ):
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@add_start_docstrings(_lowerCAmelCase )
def __call__( self : str , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : List[str] ):
return any(criteria(_lowerCAmelCase , _lowerCAmelCase ) for criteria in self )
@property
def snake_case__ ( self : Union[str, Any] ):
for stopping_criterium in self:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return stopping_criterium.max_length
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return stopping_criterium.max_length
return None
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : StoppingCriteriaList , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[Any] = stopping_criteria.max_length
__snake_case : Optional[Any] = deepcopy(__SCREAMING_SNAKE_CASE )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __SCREAMING_SNAKE_CASE )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__SCREAMING_SNAKE_CASE ) )
return new_stopping_criteria
| 354 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20 | 0 |
import qiskit
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 2 ):
'''simple docstring'''
__snake_case : Tuple = qubits
# Using Aer's simulator
__snake_case : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
__snake_case : List[Any] = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __SCREAMING_SNAKE_CASE ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __SCREAMING_SNAKE_CASE )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__SCREAMING_SNAKE_CASE ) ) , list(range(__SCREAMING_SNAKE_CASE ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__snake_case : Optional[int] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1_0_0_0 )
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 355 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["YolosFeatureExtractor"]
lowercase_ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Optional[int] = CpmAntTokenizer
A : List[Any] = False
def snake_case__ ( self : Union[str, Any] ):
super().setUp()
__snake_case : Optional[Any] = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def snake_case__ ( self : Tuple ):
__snake_case : Dict = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
__snake_case : Any = """今天天气真好!"""
__snake_case : Dict = ["""今天""", """天气""", """真""", """好""", """!"""]
__snake_case : int = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = """今天天气真好!"""
__snake_case : List[Any] = [tokenizer.bos_token] + tokens
__snake_case : List[str] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
__snake_case : Any = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 356 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[str] ):
'''simple docstring'''
__snake_case : Union[str, Any] = """"""
for word_or_phrase in separated:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 | import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (
not isinstance(__SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 | from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Any = RoCBertTokenizer
A : str = None
A : Union[str, Any] = False
A : int = True
A : str = filter_non_english
def snake_case__ ( self : Tuple ):
super().setUp()
__snake_case : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__snake_case : List[Any] = {}
__snake_case : List[Any] = {}
for i, value in enumerate(_lowerCAmelCase ):
__snake_case : Optional[Any] = i
__snake_case : int = i
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(_lowerCAmelCase , _lowerCAmelCase , ensure_ascii=_lowerCAmelCase )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(_lowerCAmelCase , _lowerCAmelCase , ensure_ascii=_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__snake_case : List[Any] = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(_lowerCAmelCase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def snake_case__ ( self : Any ):
__snake_case : Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def snake_case__ ( self : Tuple ):
__snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case__ ( self : Dict ):
__snake_case : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case__ ( self : List[Any] ):
__snake_case : str = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case__ ( self : Any ):
__snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case__ ( self : str ):
__snake_case : Dict = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__snake_case : int = {}
for i, token in enumerate(_lowerCAmelCase ):
__snake_case : int = i
__snake_case : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def snake_case__ ( self : List[Any] ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case__ ( self : List[Any] ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case__ ( self : str ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__snake_case : Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def snake_case__ ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__snake_case : Union[str, Any] = tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
__snake_case : Optional[int] = tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , """do_lower_case""" ) else False
__snake_case : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def snake_case__ ( self : int ):
__snake_case : Any = ["""的""", """人""", """有"""]
__snake_case : Tuple = """""".join(_lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : List[Any] = True
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Any = False
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__snake_case : List[str] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Dict = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Any = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : List[str] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case : Tuple = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase )
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def snake_case__ ( self : Optional[int] ):
__snake_case : List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__snake_case : Tuple = tokenizer.encode("""你好""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Optional[int] = tokenizer.encode("""你是谁""" , add_special_tokens=_lowerCAmelCase )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def snake_case__ ( self : Dict ):
__snake_case : Optional[int] = self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[Any] = """你好,你是谁"""
__snake_case : List[str] = tokenizer.tokenize(_lowerCAmelCase )
__snake_case : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__snake_case : List[str] = tokenizer.convert_tokens_to_shape_ids(_lowerCAmelCase )
__snake_case : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCAmelCase )
__snake_case : Any = tokenizer.prepare_for_model(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__snake_case : Any = tokenizer.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 359 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
from math import factorial
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
__snake_case : List[str] = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Tuple = [1] * rank
else:
__snake_case : Optional[int] = rank
def __repr__( self : Optional[Any] ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def snake_case__ ( self : List[Any] ):
__snake_case : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
__snake_case : Dict = self.duals.copy()
__snake_case : int = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
__snake_case : List[Any] = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
A : List[Any] = __add__
def __sub__( self : Dict , _lowerCAmelCase : List[Any] ):
return self + other * -1
def __mul__( self : List[str] , _lowerCAmelCase : Any ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
__snake_case : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
A : Optional[Any] = __mul__
def __truediv__( self : Tuple , _lowerCAmelCase : Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self : List[str] , _lowerCAmelCase : Optional[Any] ):
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__snake_case : Dict = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not callable(__SCREAMING_SNAKE_CASE ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__SCREAMING_SNAKE_CASE , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError("""differentiate() requires an int as input for order""" )
__snake_case : Any = Dual(__SCREAMING_SNAKE_CASE , 1 )
__snake_case : List[Any] = func(__SCREAMING_SNAKE_CASE )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 360 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ViTFeatureExtractor"]
lowercase_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
__snake_case : Optional[Any] = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__snake_case : Any = []
__snake_case : Union[str, Any] = []
__snake_case : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__snake_case : str = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
__snake_case : Optional[Any] = name[1:]
# figure out how many levels deep the name is
__snake_case : List[Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(__SCREAMING_SNAKE_CASE )
# read data
__snake_case : Union[str, Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
names.append("""/""".join(__SCREAMING_SNAKE_CASE ) )
arrays.append(__SCREAMING_SNAKE_CASE )
logger.info(F'''Read a total of {len(__SCREAMING_SNAKE_CASE ):,} layers''' )
# Sanity check
if len(set(__SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(__SCREAMING_SNAKE_CASE ) )})''' )
__snake_case : Optional[int] = list(set(__SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = full_name.split("""/""" )
__snake_case : str = model
__snake_case : Optional[Any] = []
for i, m_name in enumerate(__SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__snake_case : Optional[int] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__snake_case : str = getattr(__SCREAMING_SNAKE_CASE , """embeddings""" )
__snake_case : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """encoder""" )
__snake_case : Dict = getattr(__SCREAMING_SNAKE_CASE , """layer""" )
__snake_case : int = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__snake_case : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """pooler""" )
__snake_case : int = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__snake_case : Tuple = getattr(__SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__snake_case : List[str] = getattr(__SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__snake_case : Tuple = getattr(__SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
__snake_case : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__snake_case : str = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Dict = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__snake_case : int = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__snake_case : List[str] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__snake_case : List[str] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__snake_case : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__snake_case : Tuple = getattr(__SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__snake_case : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__snake_case : int = getattr(__SCREAMING_SNAKE_CASE , """intermediate""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
__snake_case : Any = """.""".join(__SCREAMING_SNAKE_CASE )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , __SCREAMING_SNAKE_CASE ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , __SCREAMING_SNAKE_CASE ):
__snake_case : int = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__snake_case : Union[str, Any] = array.transpose()
if pointer.shape == array.shape:
__snake_case : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
logger.info(F'''Loading model based on config from {config_path}...''' )
__snake_case : List[Any] = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = BertModel(__SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowercase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 361 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__snake_case , __snake_case : str = array[indexa], array[indexa]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Tuple = int(length / 2 )
for i in range(__SCREAMING_SNAKE_CASE , low + middle ):
comp_and_swap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + middle , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
bitonic_merge(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if length > 1:
__snake_case : Optional[Any] = int(length / 2 )
bitonic_sort(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
bitonic_sort(__SCREAMING_SNAKE_CASE , low + middle , __SCREAMING_SNAKE_CASE , 0 )
bitonic_merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 20 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case__ ( self : str ):
with self.assertRaises(_lowerCAmelCase ):
__snake_case : Dict = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def snake_case__ ( self : Tuple ):
with self.assertRaises(_lowerCAmelCase ):
__snake_case : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def snake_case__ ( self : int ):
__snake_case : Optional[int] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case__ ( self : Optional[int] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__snake_case : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def snake_case__ ( self : Any ):
__snake_case : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def snake_case__ ( self : List[Any] ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
__snake_case : Optional[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def snake_case__ ( self : List[Any] ):
__snake_case : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def snake_case__ ( self : List[str] ):
import PIL.Image
__snake_case : Dict = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_lowerCAmelCase ) as mock_cast_to_python_objects:
__snake_case : int = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
__snake_case : List[str] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _lowerCAmelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Dict = pa.BufferReader(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , pa.Buffer ) else pa.memory_map(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = pa.ipc.open_stream(__SCREAMING_SNAKE_CASE )
__snake_case : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
__snake_case : Dict = pa.BufferOutputStream()
__snake_case : List[str] = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__snake_case : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Optional[Any] = pa.BufferOutputStream()
__snake_case : Any = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
__snake_case : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__snake_case : int = pa.BufferReader(output.getvalue() )
__snake_case : List[Any] = pa.ipc.open_stream(__SCREAMING_SNAKE_CASE )
__snake_case : pa.Table = f.read_all()
__snake_case : int = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
__snake_case : Any = pa.BufferOutputStream()
with ArrowWriter(
stream=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=__SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
__snake_case : str = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__snake_case : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=__SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(__SCREAMING_SNAKE_CASE ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
__snake_case : str = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE , hash_salt="""split_name""" , check_duplicates=__SCREAMING_SNAKE_CASE , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
__snake_case : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__snake_case : Dict = pa.BufferOutputStream()
__snake_case : Optional[int] = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
__snake_case : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__snake_case : int = pa.BufferOutputStream()
__snake_case : Any = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
__snake_case : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : str = pa.BufferOutputStream()
__snake_case : int = pa.schema(__SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE , schema=__SCREAMING_SNAKE_CASE , writer_batch_size=__SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
__snake_case : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__snake_case : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCAmelCase ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Union[str, Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
__snake_case : str = os.path.join(__SCREAMING_SNAKE_CASE , """test.arrow""" )
with ArrowWriter(path=__SCREAMING_SNAKE_CASE , schema=pa.schema(__SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
__snake_case : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(__SCREAMING_SNAKE_CASE , 1 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if pa.types.is_list(__SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if isinstance(lst[0] , __SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] , __SCREAMING_SNAKE_CASE )
else:
__snake_case : Tuple = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__snake_case : Union[str, Any] = pa.array(TypedSequence(__SCREAMING_SNAKE_CASE , optimized_int_type=__SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Optional[int] = pa.array(OptimizedTypedSequence(__SCREAMING_SNAKE_CASE , col=__SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__snake_case : Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Any = pa.array(OptimizedTypedSequence(__SCREAMING_SNAKE_CASE , col=__SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__snake_case : int = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__snake_case : Any = """mock://dataset-train.arrow"""
with ArrowWriter(path=__SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__snake_case : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Tuple = pa.BufferOutputStream()
with ParquetWriter(stream=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
__snake_case : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__snake_case : str = pa.BufferReader(output.getvalue() )
__snake_case : pa.Table = pq.read_table(__SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
import PIL.Image
__snake_case : List[Any] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__SCREAMING_SNAKE_CASE , format="""png""" )
__snake_case : List[str] = pa.BufferOutputStream()
with ParquetWriter(
stream=__SCREAMING_SNAKE_CASE , features=Features({"""image""": Image()} ) , embed_local_files=__SCREAMING_SNAKE_CASE ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
__snake_case : List[str] = pa.BufferReader(output.getvalue() )
__snake_case : pa.Table = pq.read_table(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__SCREAMING_SNAKE_CASE )] )
__snake_case : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=__SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 362 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=13 , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=99 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Optional[Any]=36 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : str=None , ):
__snake_case : Any = parent
__snake_case : str = batch_size
__snake_case : List[str] = seq_length
__snake_case : str = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : Optional[Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = embedding_size
__snake_case : Any = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : Tuple = num_hidden_groups
__snake_case : List[str] = num_attention_heads
__snake_case : List[str] = intermediate_size
__snake_case : Dict = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : List[Any] = initializer_range
__snake_case : Tuple = num_labels
__snake_case : Tuple = num_choices
__snake_case : Dict = scope
def lowerCAmelCase__ ( self : int ):
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[Any] = None
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
__snake_case : List[Any] = AlbertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__snake_case : List[Any] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__snake_case : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : Union[str, Any] = AlbertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , sentence_order_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = AlbertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ):
__snake_case : Tuple = AlbertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Dict = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ):
__snake_case : Dict = self.num_labels
__snake_case : Optional[Any] = AlbertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str ):
__snake_case : List[str] = self.num_labels
__snake_case : Dict = AlbertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[Any] = self.num_choices
__snake_case : Union[str, Any] = AlbertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__snake_case : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Optional[Any] ):
__snake_case : int = self.prepare_config_and_inputs()
(
__snake_case
) : Any = config_and_inputs
__snake_case : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Tuple = True
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=False ):
__snake_case : Union[str, Any] = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__snake_case : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
__snake_case : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] ):
__snake_case : Dict = AlbertModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Any ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : str ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : List[str] = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : Tuple ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = AlbertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : int ):
__snake_case : str = AlbertModel.from_pretrained("""albert-base-v2""" )
__snake_case : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _lowerCAmelCase )
__snake_case : List[str] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
| 363 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__snake_case : str = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : int = """"""
else:
__snake_case : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__snake_case : str = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[
: config.hidden_size, :
]
__snake_case : int = in_proj_bias[: config.hidden_size]
__snake_case : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Any = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : Tuple = dct.pop(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = val
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
__snake_case : List[Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Optional[Any] = 1_0_0_0
__snake_case : Union[str, Any] = """huggingface/label-files"""
__snake_case : Union[str, Any] = """imagenet-1k-id2label.json"""
__snake_case : Tuple = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Union[str, Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__snake_case : Dict = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
__snake_case : int = int(deit_name[-6:-4] )
__snake_case : Tuple = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
__snake_case : List[str] = 1_9_2
__snake_case : Optional[int] = 7_6_8
__snake_case : Optional[Any] = 1_2
__snake_case : int = 3
elif deit_name[9:].startswith("""small""" ):
__snake_case : Union[str, Any] = 3_8_4
__snake_case : Optional[Any] = 1_5_3_6
__snake_case : List[Any] = 1_2
__snake_case : Dict = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
__snake_case : int = 1_0_2_4
__snake_case : Tuple = 4_0_9_6
__snake_case : Union[str, Any] = 2_4
__snake_case : List[Any] = 1_6
# load original model from timm
__snake_case : Optional[int] = timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : Union[str, Any] = timm_model.state_dict()
__snake_case : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
__snake_case : Any = DeiTForImageClassificationWithTeacher(__SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
__snake_case : Optional[Any] = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__snake_case : Optional[int] = DeiTImageProcessor(size=__SCREAMING_SNAKE_CASE , crop_size=config.image_size )
__snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : List[Any] = encoding["""pixel_values"""]
__snake_case : str = model(__SCREAMING_SNAKE_CASE )
__snake_case : Any = timm_model(__SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
A : List[Any] = ["flax", "transformers"]
def __init__( self : List[str] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Dict ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : int , *_lowerCAmelCase : str , **_lowerCAmelCase : int ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : int , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
requires_backends(cls , ["""flax""", """transformers"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
A : Any = ["flax", "transformers"]
def __init__( self : int , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[str] ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : int , *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : Tuple , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ):
requires_backends(cls , ["""flax""", """transformers"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
A : Tuple = ["flax", "transformers"]
def __init__( self : Dict , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : int , *_lowerCAmelCase : int , **_lowerCAmelCase : Tuple ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : Any , *_lowerCAmelCase : int , **_lowerCAmelCase : int ):
requires_backends(cls , ["""flax""", """transformers"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ):
A : Optional[Any] = ["flax", "transformers"]
def __init__( self : Any , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : Optional[int] , *_lowerCAmelCase : str , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def snake_case__ ( cls : List[str] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
| 365 | import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case , __snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case , __snake_case , __snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 20 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
__snake_case : Tuple = label_idx
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[Split, str] ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = mode.value
__snake_case : Optional[Any] = os.path.join(_lowerCAmelCase , f'''{mode}.txt''' )
__snake_case : int = 1
__snake_case : Optional[int] = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = []
__snake_case : int = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
__snake_case : Optional[int] = []
__snake_case : Dict = []
else:
__snake_case : Any = line.split(""" """ )
words.append(splits[0] )
if len(_lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
return examples
def snake_case__ ( self : int , _lowerCAmelCase : TextIO , _lowerCAmelCase : TextIO , _lowerCAmelCase : List ):
__snake_case : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__snake_case : List[Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_lowerCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def snake_case__ ( self : int , _lowerCAmelCase : str ):
if path:
with open(_lowerCAmelCase , """r""" ) as f:
__snake_case : Dict = f.read().splitlines()
if "O" not in labels:
__snake_case : str = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[Any] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case__ ( self : List[str] , _lowerCAmelCase : str ):
if path:
with open(_lowerCAmelCase , """r""" ) as f:
__snake_case : Tuple = f.read().splitlines()
if "O" not in labels:
__snake_case : int = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[Split, str] ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = mode.value
__snake_case : Any = os.path.join(_lowerCAmelCase , f'''{mode}.txt''' )
__snake_case : List[Any] = 1
__snake_case : Any = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(_lowerCAmelCase ):
__snake_case : int = []
__snake_case : str = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
return examples
def snake_case__ ( self : Any , _lowerCAmelCase : TextIO , _lowerCAmelCase : TextIO , _lowerCAmelCase : List ):
__snake_case : List[Any] = 0
for sentence in parse_incr(_lowerCAmelCase ):
__snake_case : str = preds_list[example_id]
__snake_case : List[Any] = """"""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_lowerCAmelCase )
example_id += 1
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str ):
if path:
with open(_lowerCAmelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 366 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
lowercase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : int = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
A : List[Any] = ["input_ids", "attention_mask"]
A : Optional[int] = DistilBertTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Tuple="[SEP]" , _lowerCAmelCase : Any="[PAD]" , _lowerCAmelCase : int="[CLS]" , _lowerCAmelCase : Dict="[MASK]" , _lowerCAmelCase : str=True , _lowerCAmelCase : str=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
__snake_case : Union[str, Any] = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Union[str, Any] = tokenize_chinese_chars
__snake_case : Tuple = normalizer_class(**_lowerCAmelCase )
__snake_case : Optional[int] = do_lower_case
def snake_case__ ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple=None ):
__snake_case : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : List[Any] = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 367 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[Any] = "encodec"
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : Tuple=2_40_00 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=1_28 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=[8, 5, 4, 2] , _lowerCAmelCase : str="weight_norm" , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict="reflect" , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : Optional[int]=10_24 , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=True , **_lowerCAmelCase : List[Any] , ):
__snake_case : Optional[int] = target_bandwidths
__snake_case : int = sampling_rate
__snake_case : List[Any] = audio_channels
__snake_case : str = normalize
__snake_case : Union[str, Any] = chunk_length_s
__snake_case : Union[str, Any] = overlap
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_filters
__snake_case : Optional[Any] = num_residual_layers
__snake_case : List[Any] = upsampling_ratios
__snake_case : List[str] = norm_type
__snake_case : Union[str, Any] = kernel_size
__snake_case : Optional[int] = last_kernel_size
__snake_case : Optional[Any] = residual_kernel_size
__snake_case : Dict = dilation_growth_rate
__snake_case : int = use_causal_conv
__snake_case : Tuple = pad_mode
__snake_case : str = compress
__snake_case : Optional[Any] = num_lstm_layers
__snake_case : List[Any] = trim_right_ratio
__snake_case : Any = codebook_size
__snake_case : int = codebook_dim if codebook_dim is not None else hidden_size
__snake_case : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case__ ( self : Union[str, Any] ):
__snake_case : List[str] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case__ ( self : Tuple ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 20 | 0 |
from __future__ import annotations
lowercase_ = 8.9_8_8E9 # units = N * m^s * C^-2
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
__snake_case : Optional[Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
__snake_case : Optional[Any] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__snake_case : Any = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__snake_case : List[str] = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__snake_case : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__SCREAMING_SNAKE_CASE )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
A : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
A : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Use FP16 to accelerate inference."} )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Benchmark training of model"} )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Verbose memory tracing"} )
A : bool = field(
default=__UpperCamelCase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
A : bool = field(
default=__UpperCamelCase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Trace memory line by line"} )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Save result to a CSV file"} )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Save all print statements in a log file"} )
A : bool = field(default=__UpperCamelCase , metadata={"help": "Whether to print environment information"} )
A : bool = field(
default=__UpperCamelCase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
A : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
A : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
A : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
A : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
A : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
A : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
A : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
A : bool = field(
default=__UpperCamelCase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def snake_case__ ( self : Dict ):
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , _lowerCAmelCase , )
def snake_case__ ( self : int ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case__ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def snake_case__ ( self : Tuple ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 369 | import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = "Usage of script: script_name <size_of_canvas:int>"
lowercase_ = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : List[str] = [[False for i in range(__SCREAMING_SNAKE_CASE )] for j in range(__SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : int = bool(random.getrandbits(1 ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[Any] = __judge_point(
__SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[list[bool]] ):
'''simple docstring'''
__snake_case : Any = 0
__snake_case : Dict = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : str = pt
if pt:
if alive < 2:
__snake_case : Optional[Any] = False
elif alive == 2 or alive == 3:
__snake_case : Union[str, Any] = True
elif alive > 3:
__snake_case : Optional[int] = False
else:
if alive == 3:
__snake_case : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["w", "k"])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 20 | 0 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case : Any = 1
__snake_case : List[Any] = 1
while repunit:
__snake_case : List[str] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 370 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 371 | import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
__snake_case : Dict = config_class.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case : Optional[Any] = cached_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
__snake_case : Tuple = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network
__snake_case : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__snake_case : Any = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE )
with torch.no_grad():
__snake_case : Union[str, Any] = pt_model(**pt_model.dummy_inputs )
__snake_case : Any = pto[0].numpy()
__snake_case : Optional[int] = tfo[0].numpy()
__snake_case : Optional[int] = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format="""h5""" )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , ):
'''simple docstring'''
if args_model_type is None:
__snake_case : Tuple = list(MODEL_CLASSES.keys() )
else:
__snake_case : Union[str, Any] = [args_model_type]
for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ):
print("""=""" * 1_0_0 )
print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' )
print("""=""" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case : int = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case : Union[str, Any] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ):
print("""-""" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_0_0 )
if config_shortcut_name in aws_config_map:
__snake_case : int = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case : Union[str, Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
__snake_case : List[Any] = model_shortcut_name
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(__SCREAMING_SNAKE_CASE )
os.remove(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 20 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case : Optional[int] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__snake_case : Optional[Any] = np.concatenate(__SCREAMING_SNAKE_CASE , axis=0 )
__snake_case : Union[str, Any] = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 2_5_5.0
__snake_case : List[Any] = image.transpose(0 , 3 , 1 , 2 )
__snake_case : Optional[Any] = 2.0 * image - 1.0
__snake_case : str = torch.from_numpy(__SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
__snake_case : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
return image
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.99_95 ):
'''simple docstring'''
if not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : List[str] = True
__snake_case : Union[str, Any] = va.device
__snake_case : List[Any] = va.cpu().numpy()
__snake_case : Dict = va.cpu().numpy()
__snake_case : str = np.sum(va * va / (np.linalg.norm(__SCREAMING_SNAKE_CASE ) * np.linalg.norm(__SCREAMING_SNAKE_CASE )) )
if np.abs(__SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
__snake_case : int = (1 - t) * va + t * va
else:
__snake_case : int = np.arccos(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = np.sin(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = theta_a * t
__snake_case : Tuple = np.sin(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case : int = sin_theta_t / sin_theta_a
__snake_case : int = sa * va + sa * va
if inputs_are_torch:
__snake_case : Optional[int] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
return va
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Optional[int] = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1 )
__snake_case : Tuple = F.normalize(__SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for param in model.parameters():
__snake_case : Union[str, Any] = value
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Optional[Any] , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : CLIPTextModel , _lowerCAmelCase : CLIPModel , _lowerCAmelCase : CLIPTokenizer , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _lowerCAmelCase : CLIPFeatureExtractor , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : List[str]=None , ):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , clip_model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , coca_model=_lowerCAmelCase , coca_tokenizer=_lowerCAmelCase , coca_transform=_lowerCAmelCase , )
__snake_case : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , _lowerCAmelCase )
else feature_extractor.size["""shortest_edge"""]
)
__snake_case : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _lowerCAmelCase )
set_requires_grad(self.clip_model , _lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
self.enable_attention_slicing(_lowerCAmelCase )
def snake_case__ ( self : Any ):
set_requires_grad(self.vae , _lowerCAmelCase )
def snake_case__ ( self : Optional[int] ):
set_requires_grad(self.vae , _lowerCAmelCase )
def snake_case__ ( self : List[Any] ):
set_requires_grad(self.unet , _lowerCAmelCase )
def snake_case__ ( self : str ):
set_requires_grad(self.unet , _lowerCAmelCase )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# get the original timestep using init_timestep
__snake_case : Optional[Any] = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
__snake_case : Optional[int] = max(num_inference_steps - init_timestep , 0 )
__snake_case : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=None ):
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}''' )
__snake_case : int = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[Any] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
__snake_case : Dict = torch.cat(_lowerCAmelCase , dim=0 )
else:
__snake_case : int = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Union[str, Any] = 0.18215 * init_latents
__snake_case : Union[str, Any] = init_latents.repeat_interleave(_lowerCAmelCase , dim=0 )
__snake_case : Tuple = randn_tensor(init_latents.shape , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
__snake_case : Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = init_latents
return latents
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[Any] = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__snake_case : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
__snake_case : List[Any] = self.feature_extractor.preprocess(_lowerCAmelCase )
__snake_case : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
__snake_case : Any = self.clip_model.get_image_features(_lowerCAmelCase )
__snake_case : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
__snake_case : Optional[int] = image_embeddings_clip.repeat_interleave(_lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def snake_case__ ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , ):
__snake_case : Tuple = latents.detach().requires_grad_()
__snake_case : Optional[int] = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__snake_case : Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__snake_case : Optional[Any] = self.scheduler.alphas_cumprod[timestep]
__snake_case : Optional[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case : Any = torch.sqrt(_lowerCAmelCase )
__snake_case : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _lowerCAmelCase ):
__snake_case : List[str] = self.scheduler.sigmas[index]
__snake_case : List[str] = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Any = 1 / 0.18215 * sample
__snake_case : str = self.vae.decode(_lowerCAmelCase ).sample
__snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : List[str] = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
__snake_case : Any = self.normalize(_lowerCAmelCase ).to(latents.dtype )
__snake_case : str = self.clip_model.get_image_features(_lowerCAmelCase )
__snake_case : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
__snake_case : Optional[int] = spherical_dist_loss(_lowerCAmelCase , _lowerCAmelCase ).mean() * clip_guidance_scale
__snake_case : List[Any] = -torch.autograd.grad(_lowerCAmelCase , _lowerCAmelCase )[0]
if isinstance(self.scheduler , _lowerCAmelCase ):
__snake_case : Optional[Any] = latents.detach() + grads * (sigma**2)
__snake_case : Any = noise_pred_original
else:
__snake_case : int = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , _lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[int] = 5_12 , _lowerCAmelCase : Optional[int] = 5_12 , _lowerCAmelCase : float = 0.6 , _lowerCAmelCase : Optional[int] = 50 , _lowerCAmelCase : Optional[float] = 7.5 , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[float] = 1_00 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : float = 0.8 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_lowerCAmelCase , torch.Generator ) and batch_size > 1:
__snake_case : List[str] = [generator] + [None] * (batch_size - 1)
__snake_case : str = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
__snake_case : Tuple = [x[0] for x in coca_is_none if x[1]]
__snake_case : Optional[int] = """, """.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case : Optional[int] = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__snake_case : Any = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
__snake_case : List[Any] = self.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
__snake_case : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__snake_case : Any = self.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
__snake_case : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__snake_case : str = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
__snake_case : str = text_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# set timesteps
__snake_case : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__snake_case : Tuple = {}
if accepts_offset:
__snake_case : Optional[Any] = 1
self.scheduler.set_timesteps(_lowerCAmelCase , **_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__snake_case : Any = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
__snake_case : Union[str, Any] = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
__snake_case : List[str] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
__snake_case : Optional[Any] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
__snake_case : int = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if clip_guidance_scale > 0:
__snake_case : Tuple = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : List[Any] = slerp(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : Optional[Any] = content_text_input.input_ids.shape[-1]
__snake_case : List[Any] = self.tokenizer([""""""] , padding="""max_length""" , max_length=_lowerCAmelCase , return_tensors="""pt""" )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case : str = uncond_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case : Any = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device="""cpu""" , dtype=_lowerCAmelCase ).to(
self.device )
else:
__snake_case : int = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[Any] = {}
if accepts_eta:
__snake_case : int = eta
# check if the scheduler accepts generator
__snake_case : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__snake_case : Tuple = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : int = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
__snake_case : Tuple = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case : Any = noise_pred.chunk(2 )
__snake_case : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case : Tuple = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case : Dict = self.cond_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[int] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case : Dict = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(_lowerCAmelCase ).sample
__snake_case : int = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : Optional[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 350 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
snake_case_ = ["bert-base-uncased", "bert-base-cased"]
snake_case_ = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( tf.keras.Model ):
def __init__( self : List[Any] , _lowerCAmelCase : Dict ):
super().__init__()
__snake_case : List[Any] = tokenizer
__snake_case : List[str] = AutoConfig.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = TFAutoModel.from_config(_lowerCAmelCase )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] ):
__snake_case : Dict = self.tokenizer(_lowerCAmelCase )
__snake_case : Tuple = self.bert(**_lowerCAmelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : List[str] ):
super().setUp()
__snake_case : Tuple = [
BertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__snake_case : List[Any] = [TFBertTokenizer.from_pretrained(_lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_lowerCAmelCase , use_fast_bert_tokenizer=_lowerCAmelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__snake_case : Any = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
__snake_case : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case__ ( self : str ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__snake_case : Dict = tokenizer(_lowerCAmelCase , return_tensors="""tf""" , padding="""longest""" )
__snake_case : Optional[int] = tf_tokenizer(_lowerCAmelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def snake_case__ ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Dict = tf_tokenizer(self.paired_sentences )
__snake_case : Dict = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def snake_case__ ( self : Dict ):
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Union[str, Any] = tf.function(_lowerCAmelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
__snake_case : Optional[int] = tf.constant(_lowerCAmelCase )
__snake_case : List[Any] = compiled_tokenizer(_lowerCAmelCase )
__snake_case : Optional[int] = tf_tokenizer(_lowerCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case__ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
__snake_case : int = ModelToSave(tokenizer=_lowerCAmelCase )
__snake_case : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
__snake_case : Tuple = model(_lowerCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__snake_case : Optional[Any] = Path(_lowerCAmelCase ) / """saved.model"""
model.save(_lowerCAmelCase )
__snake_case : List[str] = tf.keras.models.load_model(_lowerCAmelCase )
__snake_case : Union[str, Any] = loaded_model(_lowerCAmelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 351 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
lowercase_ = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
lowercase_ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = PRETRAINED_INIT_CONFIGURATION
A : List[str] = RoFormerTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any="[UNK]" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[int]="[PAD]" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Dict , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
):
__snake_case : Tuple = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : List[Any] = do_lower_case
__snake_case : Optional[Any] = strip_accents
__snake_case : List[str] = pre_tok_class(**_lowerCAmelCase )
__snake_case : Optional[Any] = do_lower_case
def __getstate__( self : Optional[Any] ):
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : str , _lowerCAmelCase : Dict ):
__snake_case : str = d
__snake_case : int = self.__dict__["""_tokenizer"""].get_vocab()
__snake_case : List[str] = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=None ):
__snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Tuple , ):
__snake_case : Tuple = BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
| 20 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase_ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase_ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase_ = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
lowercase_ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = ["input_ids"]
A : Tuple = VOCAB_FILES_NAMES
A : List[Any] = PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]="utf8" , _lowerCAmelCase : Optional[Any]="[UNK]" , _lowerCAmelCase : Optional[int]="[SEP]" , _lowerCAmelCase : List[str]="[PAD]" , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , vocab_file=_lowerCAmelCase , encoding=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__snake_case : List[Any] = do_lower_case
__snake_case : Any = sentencepiece_model_ckpt
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__snake_case : int = self.load_vocab(filepath=_lowerCAmelCase )
else:
__snake_case : Tuple = {self.sp_model.id_to_piece(_lowerCAmelCase ): id for id in range(self.sp_model.get_piece_size() )}
__snake_case : str = {v: k for k, v in self.vocab.items()}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] ):
if text is None:
return None
__snake_case : List[Any] = self.tokenize(_lowerCAmelCase )
__snake_case : Optional[Any] = """""", []
for i, ch in enumerate(_lowerCAmelCase ):
if ch in self.SP_CHAR_MAPPING:
__snake_case : Any = self.SP_CHAR_MAPPING.get(_lowerCAmelCase )
else:
__snake_case : Dict = unicodedata.normalize("""NFKC""" , _lowerCAmelCase )
if self.is_whitespace(_lowerCAmelCase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowerCAmelCase ) )
__snake_case : str = normalized_text, [], 0
if self.do_lower_case:
__snake_case : int = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__snake_case : int = token[1:]
__snake_case : Optional[int] = text[offset:].index(_lowerCAmelCase ) + offset
__snake_case : int = start + len(_lowerCAmelCase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__snake_case : str = end
return token_mapping
@property
def snake_case__ ( self : Tuple ):
return len(self.vocab )
def snake_case__ ( self : Dict ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : int ):
__snake_case : str = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case : Optional[Any] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] ):
return "".join((self.SP_CHAR_MAPPING.get(_lowerCAmelCase , _lowerCAmelCase ) for c in text) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[int]=0.1 ):
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__snake_case : List[str] = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__snake_case : Dict = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__snake_case : List[Any] = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__snake_case : str = self.sp_model.EncodeAsPieces(_lowerCAmelCase )
else:
__snake_case : Tuple = self.sp_model.SampleEncodeAsPieces(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = []
for pi, piece in enumerate(_lowerCAmelCase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowerCAmelCase ) and pi != 0:
new_pieces.append(_lowerCAmelCase )
continue
else:
continue
__snake_case : Optional[int] = 0
for i, chunk in enumerate(_lowerCAmelCase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowerCAmelCase ) or self.is_punct(_lowerCAmelCase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowerCAmelCase )
__snake_case : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__snake_case : Tuple = i
if len(_lowerCAmelCase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
__snake_case : int = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
__snake_case : int = self.convert_ids_to_tokens(_lowerCAmelCase )
__snake_case : Any = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def snake_case__ ( self : Dict , _lowerCAmelCase : Tuple ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
return self.reverse_vocab.get(_lowerCAmelCase , self.unk_token )
def snake_case__ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
__snake_case : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowerCAmelCase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowerCAmelCase ) + 1) + [1] * (len(_lowerCAmelCase ) + 3)
def snake_case__ ( self : Tuple , _lowerCAmelCase : List[str] ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case__ ( self : List[str] , _lowerCAmelCase : Any ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case__ ( self : int , _lowerCAmelCase : List[Any] ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowerCAmelCase ) == 1:
__snake_case : Dict = unicodedata.category(_lowerCAmelCase )
if cat == "Zs":
return True
return False
def snake_case__ ( self : str , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = {}
with io.open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_lowerCAmelCase ):
__snake_case : Tuple = line.rstrip("""\n""" )
__snake_case : List[str] = int(_lowerCAmelCase )
return token_to_idx
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Optional[int] = 0
if os.path.isdir(_lowerCAmelCase ):
__snake_case : int = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__snake_case : Optional[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case : Union[str, Any] = token_index
writer.write(token + """\n""" )
index += 1
__snake_case : List[Any] = os.path.join(_lowerCAmelCase , """sentencepiece.bpe.model""" )
with open(_lowerCAmelCase , """wb""" ) as fi:
__snake_case : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (vocab_file,)
| 352 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __lowerCAmelCase ( _A : str , _A : Optional[int] , _A : Union[str, Any] ):
'''simple docstring'''
__snake_case : Optional[Any] = state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = val
def __lowerCAmelCase ( _A : Optional[Any] ):
'''simple docstring'''
__snake_case : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__snake_case : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__snake_case : List[Any] = value
else:
__snake_case : Tuple = value
return new_state_dict
def __lowerCAmelCase ( _A : Optional[Any] , _A : List[str]=False ):
'''simple docstring'''
__snake_case : List[Any] = """"""
if is_panoptic:
__snake_case : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__snake_case : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Optional[Any] = in_proj_weight[:2_5_6, :]
__snake_case : List[Any] = in_proj_bias[:2_5_6]
__snake_case : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
__snake_case : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
__snake_case : Union[str, Any] = in_proj_weight[-2_5_6:, :]
__snake_case : Tuple = in_proj_bias[-2_5_6:]
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( _A : Any , _A : Any ):
'''simple docstring'''
__snake_case : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__snake_case : List[str] = """resnet101"""
if "dc5" in model_name:
__snake_case : List[Any] = True
__snake_case : List[Any] = """panoptic""" in model_name
if is_panoptic:
__snake_case : List[Any] = 2_5_0
else:
__snake_case : Tuple = 9_1
__snake_case : int = """huggingface/label-files"""
__snake_case : Tuple = """coco-detection-id2label.json"""
__snake_case : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Union[str, Any] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__snake_case : List[str] = idalabel
__snake_case : Dict = {v: k for k, v in idalabel.items()}
# load image processor
__snake_case : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
__snake_case : int = ConditionalDetrImageProcessor(format=__SCREAMING_SNAKE_CASE )
# prepare image
__snake_case : str = prepare_img()
__snake_case : List[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
__snake_case : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
__snake_case : Optional[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , __SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE ).eval()
__snake_case : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__snake_case : List[str] = """conditional_detr.""" + src
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : List[str] = rename_backbone_keys(__SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(__SCREAMING_SNAKE_CASE , is_panoptic=__SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__snake_case : Optional[Any] = state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__snake_case : Union[str, Any] = state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : int = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__snake_case : Any = state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__snake_case : Tuple = state_dict.pop(__SCREAMING_SNAKE_CASE )
__snake_case : Dict = val
# finally, create HuggingFace model and load state dict
__snake_case : Optional[Any] = ConditionalDetrForSegmentation(__SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=__SCREAMING_SNAKE_CASE , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
__snake_case : Union[str, Any] = conditional_detr(__SCREAMING_SNAKE_CASE )
__snake_case : Tuple = model(__SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 353 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list[int] ): # This function is recursive
'''simple docstring'''
__snake_case : Dict = len(__SCREAMING_SNAKE_CASE )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case : Tuple = array[0]
__snake_case : List[Any] = False
__snake_case : str = 1
__snake_case : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case : Any = True
__snake_case : Optional[Any] = [element for element in array[i:] if element >= array[i]]
__snake_case : Union[str, Any] = longest_subsequence(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
__snake_case : int = temp_array
else:
i += 1
__snake_case : Optional[int] = [element for element in array[1:] if element >= pivot]
__snake_case : List[str] = [pivot, *longest_subsequence(__SCREAMING_SNAKE_CASE )]
if len(__SCREAMING_SNAKE_CASE ) > len(__SCREAMING_SNAKE_CASE ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.