code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ) -> Optional[Any]:
_UpperCAmelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any]="replace" , __UpperCamelCase : str="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : List[str]="<pad>" , __UpperCamelCase : List[Any]="<mask>" , __UpperCamelCase : str=False , **__UpperCamelCase : Optional[Any] , ):
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(A_ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(A_ , range(len(A_ ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Tuple ):
return len(self.encoder )
def UpperCAmelCase__ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Tuple ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(A_ )
_UpperCAmelCase = get_pairs(A_ )
if not pairs:
return token
while True:
_UpperCAmelCase = min(A_ , key=lambda __UpperCamelCase : self.bpe_ranks.get(A_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(A_ ):
try:
_UpperCAmelCase = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(A_ )
_UpperCAmelCase = new_word
if len(A_ ) == 1:
break
else:
_UpperCAmelCase = get_pairs(A_ )
_UpperCAmelCase = " ".join(A_ )
_UpperCAmelCase = word
return word
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] ):
_UpperCAmelCase = []
for token in re.findall(self.pat , A_ ):
_UpperCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Tuple ):
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : str ):
return self.decoder.get(A_ )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Any ):
_UpperCAmelCase = "".join(A_ )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : int = None ):
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + "\n" )
_UpperCAmelCase = 0
with open(A_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase = token_index
writer.write(" ".join(A_ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any]=False , **__UpperCamelCase : int ):
_UpperCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
_UpperCAmelCase = " " + text
return (text, kwargs)
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Any ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
_UpperCAmelCase = " ".join(A_ )
_UpperCAmelCase = self.encode(A_ )
if len(A_ ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: str , lowerCAmelCase: str ) -> Union[str, Any]:
# Construct model
if gpta_config_file == "":
_UpperCAmelCase : Optional[int] = GPTaConfig()
else:
_UpperCAmelCase : Optional[Any] = GPTaConfig.from_json_file(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = GPTaModel(lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCAmelCase : Optional[int] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 300 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''speech_to_text_2'''
_A : Optional[Any] = ['''past_key_values''']
_A : Tuple = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , __a : str=10000 , __a : str=6 , __a : Optional[int]=2048 , __a : Dict=4 , __a : str=0.0 , __a : Optional[Any]=True , __a : str="relu" , __a : Dict=256 , __a : str=0.1 , __a : Tuple=0.0 , __a : Any=0.0 , __a : Optional[int]=0.02 , __a : Any=2 , __a : Dict=True , __a : Optional[int]=1 , __a : Dict=0 , __a : List[Any]=2 , __a : str=1024 , **__a : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase : str = vocab_size
__lowercase : Optional[int] = d_model
__lowercase : Union[str, Any] = decoder_ffn_dim
__lowercase : str = decoder_layers
__lowercase : List[str] = decoder_attention_heads
__lowercase : int = dropout
__lowercase : List[str] = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : Optional[Any] = activation_function
__lowercase : Any = init_std
__lowercase : Dict = decoder_layerdrop
__lowercase : Tuple = use_cache
__lowercase : Optional[Any] = decoder_layers
__lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : Tuple = max_target_positions
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , ) | 649 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 649 | 1 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = ""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase__ (__lowerCamelCase ):
return data[1:] + data[0]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = ""
for i in range(len(lowerCamelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = int("0b" + data[0] + data[-1], 2 )
_SCREAMING_SNAKE_CASE : Any = int("0b" + data[1:3], 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = message[:4]
_SCREAMING_SNAKE_CASE : Tuple = message[4:]
_SCREAMING_SNAKE_CASE : Optional[Any] = apply_table(lowerCamelCase_, lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = xor(lowerCamelCase_, lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = apply_sbox(lowerCamelCase_, temp[:4] ) # noqa: E741
_SCREAMING_SNAKE_CASE : int = apply_sbox(lowerCamelCase_, temp[4:] )
_SCREAMING_SNAKE_CASE : Any = "0" * (2 - len(lowerCamelCase_ )) + l # noqa: E741
_SCREAMING_SNAKE_CASE : List[Any] = "0" * (2 - len(lowerCamelCase_ )) + r
_SCREAMING_SNAKE_CASE : Optional[int] = apply_table(l + r, lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = xor(lowerCamelCase_, lowerCamelCase_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ =input('Enter 10 bit key: ')
UpperCamelCase__ =input('Enter 8 bit message: ')
UpperCamelCase__ =[6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ =[2, 4, 3, 1]
UpperCamelCase__ =[2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ =[4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ =[4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ =apply_table(key, paa_table)
UpperCamelCase__ =temp[:5]
UpperCamelCase__ =temp[5:]
UpperCamelCase__ =left_shift(left)
UpperCamelCase__ =left_shift(right)
UpperCamelCase__ =apply_table(left + right, pa_table)
UpperCamelCase__ =left_shift(left)
UpperCamelCase__ =left_shift(right)
UpperCamelCase__ =left_shift(left)
UpperCamelCase__ =left_shift(right)
UpperCamelCase__ =apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ =apply_table(message, IP)
UpperCamelCase__ =function(expansion, sa, sa, keya, temp)
UpperCamelCase__ =temp[4:] + temp[:4]
UpperCamelCase__ =function(expansion, sa, sa, keya, temp)
UpperCamelCase__ =apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
UpperCamelCase__ =apply_table(CT, IP)
UpperCamelCase__ =function(expansion, sa, sa, keya, temp)
UpperCamelCase__ =temp[4:] + temp[:4]
UpperCamelCase__ =function(expansion, sa, sa, keya, temp)
UpperCamelCase__ =apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT) | 249 |
import torch
from torch import nn
class __magic_name__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase = n_token
UpperCAmelCase = d_embed
UpperCAmelCase = d_proj
UpperCAmelCase = cutoffs + [n_token]
UpperCAmelCase = [0] + self.cutoffs
UpperCAmelCase = div_val
UpperCAmelCase = self.cutoffs[0]
UpperCAmelCase = len(self.cutoffs ) - 1
UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
UpperCAmelCase = keep_order
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if proj is None:
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase = hidden[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase = labels != -1_00
UpperCAmelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = 0
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx
UpperCAmelCase = head_logprob.index_select(0 , UpperCamelCase__ )
UpperCAmelCase = hidden.index_select(0 , UpperCamelCase__ )
else:
UpperCAmelCase = hidden
if i == 0:
if labels is not None:
UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase = logprob_i
return out
| 323 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A_ :
'''simple docstring'''
def _snake_case ( self: Optional[Any] , a: Optional[int] , a: Optional[Any] , a: Optional[Any] ):
return None
class A_ :
'''simple docstring'''
def _snake_case ( self: List[Any] , a: Optional[int] , a: List[Any] , a: List[Any] , a: Dict ):
return None
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self: List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'tf' , 12 , **a )
@require_torch
@slow
def _snake_case ( self: List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , 'pt' , 12 , **a )
@require_torch
@slow
def _snake_case ( self: Any ):
from transformers import BertModel
__lowerCamelCase : Any = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(a ) )
vocab_file.flush()
__lowerCamelCase : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__lowerCamelCase : List[Any] = BertModel(BertConfig(vocab_size=len(a ) ) )
model.save_pretrained(a )
self._test_export(a , 'pt' , 12 , a )
@require_tf
@slow
def _snake_case ( self: Optional[int] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowerCamelCase : str = self._test_export(a , 'tf' , 12 , **a )
__lowerCamelCase : int = quantize(Path(a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def _snake_case ( self: Dict ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__lowerCamelCase : List[Any] = self._test_export(a , 'pt' , 12 , **a )
__lowerCamelCase : int = quantize(a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def _snake_case ( self: Tuple , a: List[str] , a: Dict , a: Optional[Any] , a: List[str]=None , **a: Optional[int] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__lowerCamelCase : Union[str, Any] = Path(a ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a , a , a , a , a , **a )
return path
except Exception as e:
self.fail(a )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self: Optional[int] ):
from transformers import BertModel
__lowerCamelCase : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(a , a , 'pt' )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self: Optional[Any] ):
from transformers import TFBertModel
__lowerCamelCase : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__lowerCamelCase : str = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(a , a , 'tf' )
def _snake_case ( self: Any , a: Dict , a: int , a: List[str] ):
__lowerCamelCase : List[str] = FeatureExtractionPipeline(a , a )
__lowerCamelCase : List[Any] = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
__lowerCamelCase : Optional[Any] = infer_shapes(a , a )
# Assert all variables are present
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , a )
self.assertSequenceEqual(variable_names[3:] , a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Dict = ['input_ids', 'attention_mask', 'token_type_ids']
__lowerCamelCase : str = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
__lowerCamelCase : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , a , a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(a ) , set(a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , a , a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a ) , 1 )
self.assertEqual(len(a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : List[str] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 721 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 230 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowercase : Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , a__ ) -> Dict:
super().__init__()
A = torchvision.models.resnetaaa(pretrained=a__ )
A = list(model.children() )[:-2]
A = nn.Sequential(*a__ )
A = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase ( self , a__ ) -> List[str]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
A = self.pool(self.model(a__ ) )
A = torch.flatten(a__ , start_dim=2 )
A = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
A = [json.loads(a__ ) for l in open(a__ )]
A = os.path.dirname(a__ )
A = tokenizer
A = labels
A = len(a__ )
A = max_seq_length
A = transforms
def __len__( self ) -> Tuple:
return len(self.data )
def __getitem__( self , a__ ) -> Optional[int]:
A = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=a__ ) )
A , A , A = sentence[0], sentence[1:-1], sentence[-1]
A = sentence[: self.max_seq_length]
A = torch.zeros(self.n_classes )
A = 1
A = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
A = self.transforms(a__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def _lowerCAmelCase ( UpperCamelCase__: int ) -> List[str]:
"""simple docstring"""
A = [len(row["""sentence"""] ) for row in batch]
A , A = len(UpperCamelCase__ ), max(UpperCamelCase__ )
A = torch.zeros(UpperCamelCase__ , UpperCamelCase__ , dtype=torch.long )
A = torch.zeros(UpperCamelCase__ , UpperCamelCase__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
A = input_row["""sentence"""]
A = 1
A = torch.stack([row["""image"""] for row in batch] )
A = torch.stack([row["""label"""] for row in batch] )
A = torch.stack([row["""image_start_token"""] for row in batch] )
A = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 641 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'efficientformer'
def __init__( self , a__ = [3, 2, 6, 4] , a__ = [48, 96, 224, 448] , a__ = [True, True, True, True] , a__ = 448 , a__ = 32 , a__ = 4 , a__ = 7 , a__ = 5 , a__ = 8 , a__ = 4 , a__ = 0.0 , a__ = 16 , a__ = 3 , a__ = 3 , a__ = 3 , a__ = 2 , a__ = 1 , a__ = 0.0 , a__ = 1 , a__ = True , a__ = True , a__ = 1e-5 , a__ = "gelu" , a__ = 0.02 , a__ = 1e-12 , a__ = 224 , a__ = 1e-05 , **a__ , ) -> None:
super().__init__(**a__ )
A = hidden_act
A = hidden_dropout_prob
A = hidden_sizes
A = num_hidden_layers
A = num_attention_heads
A = initializer_range
A = layer_norm_eps
A = patch_size
A = num_channels
A = depths
A = mlp_expansion_ratio
A = downsamples
A = dim
A = key_dim
A = attention_ratio
A = resolution
A = pool_size
A = downsample_patch_size
A = downsample_stride
A = downsample_pad
A = drop_path_rate
A = num_metaad_blocks
A = distillation
A = use_layer_scale
A = layer_scale_init_value
A = image_size
A = batch_norm_eps
| 641 | 1 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = int(_UpperCamelCase )
# Initialize Result
__lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A : Union[str, Any] = []
A : Optional[int] = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
A : Union[str, Any] = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
A : List[Any] = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
A : Optional[Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
A : Optional[int] = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'''Following is minimal change for {value}: ''')
A : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 282 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = len(_UpperCamelCase )
print("The following activities are selected:" )
# The first activity is always selected
__lowerCAmelCase = 0
print(_UpperCamelCase , end="," )
# Consider rest of the activities
for j in range(_UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_UpperCamelCase , end="," )
__lowerCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Dict = [1, 3, 0, 5, 8, 5]
A : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 282 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :Dict = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCamelCase :Tuple = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = torch.load(lowerCamelCase__ , map_location="""cpu""" )
return sd
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=rename_keys_prefix ):
'''simple docstring'''
A_ : Any = OrderedDict()
A_ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A_ : Optional[Any] = key
for name_pair in rename_keys_prefix:
A_ : Dict = new_key.replace(name_pair[0] , name_pair[1] )
A_ : Any = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A_ : int = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
A_ : Tuple = """pretraining"""
if "vcr" in checkpoint_path:
A_ : Optional[int] = {"""visual_embedding_dim""": 5_12}
elif "vqa_advanced" in checkpoint_path:
A_ : Any = {"""visual_embedding_dim""": 20_48}
elif "vqa" in checkpoint_path:
A_ : List[str] = {"""visual_embedding_dim""": 20_48}
elif "nlvr" in checkpoint_path:
A_ : List[Any] = {"""visual_embedding_dim""": 10_24}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
A_ : Dict = {"""visual_embedding_dim""": 5_12}
A_ : Any = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
A_ : Tuple = {"""visual_embedding_dim""": 20_48}
A_ : int = """vqa_advanced"""
elif "vqa" in checkpoint_path:
A_ : List[str] = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29}
A_ : Optional[Any] = """vqa"""
elif "nlvr" in checkpoint_path:
A_ : str = {
"""visual_embedding_dim""": 10_24,
"""num_labels""": 2,
}
A_ : Tuple = """nlvr"""
A_ : Union[str, Any] = VisualBertConfig(**lowerCamelCase__ )
# Load State Dict
A_ : Any = load_state_dict(lowerCamelCase__ )
A_ : List[str] = get_new_dict(lowerCamelCase__ , lowerCamelCase__ )
if model_type == "pretraining":
A_ : Union[str, Any] = VisualBertForPreTraining(lowerCamelCase__ )
elif model_type == "vqa":
A_ : Union[str, Any] = VisualBertForQuestionAnswering(lowerCamelCase__ )
elif model_type == "nlvr":
A_ : int = VisualBertForVisualReasoning(lowerCamelCase__ )
elif model_type == "multichoice":
A_ : Dict = VisualBertForMultipleChoice(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Save Checkpoints
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase :Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path) | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A =2_5_0_0_0_4
__A =2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = MBartTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = MBartTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = MBartTokenizer(lowercase , keep_accents=lowercase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(lowercase )
lowerCamelCase_ = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowerCamelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(lowercase )
lowerCamelCase_ = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
lowerCamelCase_ = tokenizer_p.save_pretrained(lowercase )
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(lowercase )
lowerCamelCase_ = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase )
lowerCamelCase_ = tokenizer_p.save_pretrained(lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase_ = tokenizer_r.from_pretrained(lowercase )
lowerCamelCase_ = tokenizer_p.from_pretrained(lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase ) )
shutil.rmtree(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = 'facebook/mbart-large-en-ro'
lowerCAmelCase__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCAmelCase__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCAmelCase__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def SCREAMING_SNAKE_CASE_( cls ) -> Optional[Any]:
lowerCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
lowerCamelCase_ = 1
return cls
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.assertIn(lowercase , self.tokenizer.all_special_ids )
lowerCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase_ = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowercase )
lowerCamelCase_ = 10
lowerCamelCase_ = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase )
self.assertEqual(len(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
lowerCamelCase_ = MBartTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors="pt" )
lowerCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors="pt" )
lowerCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors="pt" )
lowerCamelCase_ = targets["input_ids"]
lowerCamelCase_ = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(lowercase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 313 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , ) -> Union[str, Any]:
lowerCamelCase_ = size if size is not None else {"shortest_edge": 20}
lowerCamelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_flip_channel_order
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_flip_channel_order" ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_( self ) -> str:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 313 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
lowerCAmelCase__ = import_module("tasks" )
try:
lowerCAmelCase__ = getattr(lowerCAmelCase_ , model_args.task_type )
lowerCAmelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase__ = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase__ = dict(enumerate(lowerCAmelCase_ ) )
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid={label: i for i, label in enumerate(lowerCAmelCase_ )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase__ = (
TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ) -> Tuple[List[int], List[int]]:
lowerCAmelCase__ = np.argmax(lowerCAmelCase_ , axis=2 )
lowerCAmelCase__ , lowerCAmelCase__ = preds.shape
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
lowerCAmelCase__ = [[] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCAmelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"precision": precision_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"recall": recall_score(lowerCAmelCase_ , lowerCAmelCase_ ),
"f1": fa_score(lowerCAmelCase_ , lowerCAmelCase_ ),
}
# Data collator
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCAmelCase_ )
# Predict
if training_args.do_predict:
lowerCAmelCase__ = TokenClassificationDataset(
token_classification_task=lowerCAmelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , labels=lowerCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = trainer.predict(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = align_predictions(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
lowerCAmelCase__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return results
def _A ( lowerCAmelCase_ : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 61 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 | 0 |
"""simple docstring"""
from math import sqrt
def _lowerCAmelCase ( UpperCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( UpperCamelCase_ = 1_0001 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : int
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
__magic_name__ = namedtuple("CoinsDistribResult", "moves excess")
def _lowerCAmelCase ( UpperCamelCase_ ):
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCamelCase_ ) != count_coins(UpperCamelCase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_distrib(node.left )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_distrib(node.right )
__SCREAMING_SNAKE_CASE = 1 - left_distrib_excess
__SCREAMING_SNAKE_CASE = 1 - right_distrib_excess
__SCREAMING_SNAKE_CASE = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCamelCase_ )
+ abs(UpperCamelCase_ )
)
__SCREAMING_SNAKE_CASE = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCamelCase_ , UpperCamelCase_ )
return get_distrib(UpperCamelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''unispeech-sat'''
def __init__( self , __lowerCAmelCase=3_2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=1_2 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="group" , __lowerCAmelCase="gelu" , __lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=1_2_8 , __lowerCAmelCase=1_6 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.05 , __lowerCAmelCase=1_0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0 , __lowerCAmelCase=3_2_0 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0_0 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=0.1 , __lowerCAmelCase="mean" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=2_5_6 , __lowerCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=5_0_4 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
__magic_name__ :Optional[int] = hidden_size
__magic_name__ :Any = feat_extract_norm
__magic_name__ :int = feat_extract_activation
__magic_name__ :str = list(__lowerCAmelCase )
__magic_name__ :Dict = list(__lowerCAmelCase )
__magic_name__ :Tuple = list(__lowerCAmelCase )
__magic_name__ :Dict = conv_bias
__magic_name__ :Dict = num_conv_pos_embeddings
__magic_name__ :int = num_conv_pos_embedding_groups
__magic_name__ :Any = len(self.conv_dim )
__magic_name__ :Optional[Any] = num_hidden_layers
__magic_name__ :List[Any] = intermediate_size
__magic_name__ :Union[str, Any] = hidden_act
__magic_name__ :List[str] = num_attention_heads
__magic_name__ :Tuple = hidden_dropout
__magic_name__ :Tuple = attention_dropout
__magic_name__ :List[str] = activation_dropout
__magic_name__ :Any = feat_proj_dropout
__magic_name__ :List[str] = final_dropout
__magic_name__ :Tuple = layerdrop
__magic_name__ :List[Any] = layer_norm_eps
__magic_name__ :List[Any] = initializer_range
__magic_name__ :Optional[Any] = vocab_size
__magic_name__ :Tuple = num_clusters
__magic_name__ :str = do_stable_layer_norm
__magic_name__ :Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ :Dict = apply_spec_augment
__magic_name__ :List[Any] = mask_time_prob
__magic_name__ :Tuple = mask_time_length
__magic_name__ :Any = mask_time_min_masks
__magic_name__ :Optional[Any] = mask_feature_prob
__magic_name__ :int = mask_feature_length
__magic_name__ :Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ :Any = num_codevectors_per_group
__magic_name__ :Dict = num_codevector_groups
__magic_name__ :List[str] = contrastive_logits_temperature
__magic_name__ :List[Any] = feat_quantizer_dropout
__magic_name__ :List[str] = num_negatives
__magic_name__ :Union[str, Any] = codevector_dim
__magic_name__ :Optional[int] = proj_codevector_dim
__magic_name__ :Optional[Any] = diversity_loss_weight
# ctc loss
__magic_name__ :Tuple = ctc_loss_reduction
__magic_name__ :Tuple = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__ :Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__ :Any = list(__lowerCAmelCase )
__magic_name__ :str = list(__lowerCAmelCase )
__magic_name__ :str = list(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = xvector_output_dim
@property
def A ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
_SCREAMING_SNAKE_CASE = {
"""junnyu/roformer_chinese_small""": 1_536,
"""junnyu/roformer_chinese_base""": 1_536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
_SCREAMING_SNAKE_CASE = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : str = RoFormerTokenizer
def __init__( self : Dict , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , snake_case_ : Dict=True , snake_case_ : List[str]="[UNK]" , snake_case_ : Any="[SEP]" , snake_case_ : Optional[int]="[PAD]" , snake_case_ : Any="[CLS]" , snake_case_ : List[str]="[MASK]" , snake_case_ : int=True , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , snake_case_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , snake_case_ ) != strip_accents
):
__snake_case = getattr(snake_case_ , pre_tok_state.pop("type" ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = pre_tok_class(**snake_case_ )
__snake_case = do_lower_case
def __getstate__( self : Tuple ):
__snake_case = self.__dict__.copy()
__snake_case = BertPreTokenizer()
return state
def __setstate__( self : Optional[Any] , snake_case_ : Dict ):
__snake_case = d
__snake_case = self.__dict__["_tokenizer"].get_vocab()
__snake_case = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCAmelCase ( self : Dict , snake_case_ : Optional[Any] , snake_case_ : List[Any]=None ):
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : List[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
__snake_case = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : Optional[Any]=False , **snake_case_ : Optional[int] , ):
__snake_case = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 163 | 0 |
'''simple docstring'''
import numpy as np
def lowercase__ ( __UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase__ ( __UpperCamelCase : np.array ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase : int | str ):
'''simple docstring'''
__lowercase = str(__UpperCamelCase )
return n == n[::-1]
def lowercase__ ( __UpperCamelCase : int = 1000000 ):
'''simple docstring'''
__lowercase = 0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 339 | 1 |
from importlib import import_module
from .logging import get_logger
snake_case__ : Any = get_logger(__name__)
class _A :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__lowercase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
__lowercase = module._original_module if isinstance(lowerCamelCase , _PatchedModuleObj ) else module
class _A :
'''simple docstring'''
_snake_case : List[Any] = []
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
__lowercase = obj
__lowercase = target
__lowercase = new
__lowercase = target.split("." )[0]
__lowercase = {}
__lowercase = attrs or []
def __enter__( self : int ):
'''simple docstring'''
*__lowercase , __lowercase = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCamelCase ) ):
try:
__lowercase = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase = getattr(self.obj , lowerCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase = obj_attr
# patch at top level
setattr(self.obj , lowerCamelCase , _PatchedModuleObj(lowerCamelCase , attrs=self.attrs ) )
__lowercase = getattr(self.obj , lowerCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCamelCase , lowerCamelCase , _PatchedModuleObj(getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , attrs=self.attrs ) )
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
# finally set the target attribute
setattr(lowerCamelCase , lowerCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase = getattr(import_module(".".join(lowerCamelCase ) ) , lowerCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCamelCase ) is attr_value:
__lowercase = getattr(self.obj , lowerCamelCase )
setattr(self.obj , lowerCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase = globals()["__builtins__"][target_attr]
setattr(self.obj , lowerCamelCase , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Any , *lowerCamelCase : List[str] ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , lowerCamelCase , self.original.pop(lowerCamelCase ) )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def _snake_case ( self : int ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 402 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : int ):
'''simple docstring'''
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 402 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
A = value
A = None
A = None
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] ,_SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
A = tree
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def snake_case ( UpperCAmelCase : List[str] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def snake_case ( UpperCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
A = ord(UpperCAmelCase )
if not _is_chinese_char(UpperCAmelCase ):
return 0
return 1
def snake_case ( UpperCAmelCase : List[str] ):
A = set()
for token in tokens:
A = len(UpperCAmelCase ) > 1 and is_chinese(UpperCAmelCase )
if chinese_word:
word_set.add(UpperCAmelCase )
A = list(UpperCAmelCase )
return word_list
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
A = max([len(UpperCAmelCase ) for w in chinese_word_set] )
A = bert_tokens
A , A = 0, len(UpperCAmelCase )
while start < end:
A = True
if is_chinese(bert_word[start] ):
A = min(end - start, UpperCAmelCase )
for i in range(UpperCAmelCase, 1, -1 ):
A = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A = '##' + bert_word[j]
A = start + i
A = False
break
if single_word:
start += 1
return bert_word
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : LTP, UpperCAmelCase : BertTokenizer ):
A = []
for i in range(0, len(UpperCAmelCase ), 1_00 ):
A = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
A = [get_chinese_word(UpperCAmelCase ) for r in res]
ltp_res.extend(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A = []
for i in range(0, len(UpperCAmelCase ), 1_00 ):
A = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase, truncation=UpperCAmelCase, max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A = []
for input_ids, chinese_word in zip(UpperCAmelCase, UpperCAmelCase ):
A = []
for id in input_ids:
A = bert_tokenizer._convert_id_to_token(UpperCAmelCase )
input_tokens.append(UpperCAmelCase )
A = add_sub_symbol(UpperCAmelCase, UpperCAmelCase )
A = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase ):
if token[:2] == "##":
A = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase ) == 1 and _is_chinese_char(ord(UpperCAmelCase ) ):
ref_id.append(UpperCAmelCase )
ref_ids.append(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
return ref_ids
def snake_case ( UpperCAmelCase : List[Any] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, 'r', encoding='utf-8' ) as f:
A = f.readlines()
A = [line.strip() for line in data if len(UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A = LTP(args.ltp ) # faster in GPU device
A = BertTokenizer.from_pretrained(args.bert )
A = prepare_ref(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
with open(args.save_path, 'w', encoding='utf-8' ) as f:
A = [json.dumps(UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowerCAmelCase_ = parser.parse_args()
main(args)
| 110 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> bool:
"""simple docstring"""
_UpperCAmelCase = len(_A )
_UpperCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_UpperCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_UpperCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_UpperCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_UpperCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 555 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
a : Tuple = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class a_ ( _UpperCAmelCase ):
a : int = 'retribert'
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=3_05_22 , __UpperCamelCase : int=7_68 , __UpperCamelCase : Any=8 , __UpperCamelCase : Dict=12 , __UpperCamelCase : List[Any]=30_72 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=5_12 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : int=True , __UpperCamelCase : str=1_28 , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : Any , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = share_encoders
_UpperCAmelCase = projection_dim | 555 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase = CycleDiffusionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__: List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase__: Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__: List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase__: int = CLIPTextModel(__a )
lowerCamelCase__: Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase__: Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self : List[str] , __a : int , __a : List[Any]=0 ):
'''simple docstring'''
lowerCamelCase__: Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase__: Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase__: Optional[int] = torch.manual_seed(__a )
else:
lowerCamelCase__: List[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase__: Any = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__: Any = self.get_dummy_components()
lowerCamelCase__: Union[str, Any] = CycleDiffusionPipeline(**__a )
lowerCamelCase__: List[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: Tuple = self.get_dummy_inputs(__a )
lowerCamelCase__: List[Any] = pipe(**__a )
lowerCamelCase__: List[Any] = output.images
lowerCamelCase__: List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__: str = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Any = self.get_dummy_components()
for name, module in components.items():
if hasattr(__a , """half""" ):
lowerCamelCase__: Optional[int] = module.half()
lowerCamelCase__: Any = CycleDiffusionPipeline(**__a )
lowerCamelCase__: str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase__: Dict = self.get_dummy_inputs(__a )
lowerCamelCase__: Tuple = pipe(**__a )
lowerCamelCase__: int = output.images
lowerCamelCase__: int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__: Optional[int] = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
lowerCamelCase__: Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
lowerCamelCase__: int = init_image.resize((512, 512) )
lowerCamelCase__: Optional[Any] = """CompVis/stable-diffusion-v1-4"""
lowerCamelCase__: List[Any] = DDIMScheduler.from_pretrained(__a , subfolder="""scheduler""" )
lowerCamelCase__: List[str] = CycleDiffusionPipeline.from_pretrained(
__a , scheduler=__a , safety_checker=__a , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: Union[str, Any] = """A black colored car"""
lowerCamelCase__: Optional[Any] = """A blue colored car"""
lowerCamelCase__: List[str] = torch.manual_seed(0 )
lowerCamelCase__: str = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type="""np""" , )
lowerCamelCase__: Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
lowerCamelCase__: Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
lowerCamelCase__: List[str] = init_image.resize((512, 512) )
lowerCamelCase__: List[str] = """CompVis/stable-diffusion-v1-4"""
lowerCamelCase__: Tuple = DDIMScheduler.from_pretrained(__a , subfolder="""scheduler""" )
lowerCamelCase__: Union[str, Any] = CycleDiffusionPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase__: Optional[int] = """A black colored car"""
lowerCamelCase__: str = """A blue colored car"""
lowerCamelCase__: Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__: Dict = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type="""np""" , )
lowerCamelCase__: int = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 242 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """efficientnet"""
def __init__( self : List[Any] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.001 , __a : float = 0.99 , __a : float = 0.5 , __a : float = 0.2 , **__a : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__a )
lowerCamelCase__: str = num_channels
lowerCamelCase__: Optional[Any] = image_size
lowerCamelCase__: str = width_coefficient
lowerCamelCase__: int = depth_coefficient
lowerCamelCase__: Optional[Any] = depth_divisor
lowerCamelCase__: Union[str, Any] = kernel_sizes
lowerCamelCase__: str = in_channels
lowerCamelCase__: int = out_channels
lowerCamelCase__: Union[str, Any] = depthwise_padding
lowerCamelCase__: List[str] = strides
lowerCamelCase__: Tuple = num_block_repeats
lowerCamelCase__: int = expand_ratios
lowerCamelCase__: List[str] = squeeze_expansion_ratio
lowerCamelCase__: Tuple = hidden_act
lowerCamelCase__: Optional[Any] = hidden_dim
lowerCamelCase__: List[Any] = pooling_type
lowerCamelCase__: Optional[int] = initializer_range
lowerCamelCase__: Any = batch_norm_eps
lowerCamelCase__: Union[str, Any] = batch_norm_momentum
lowerCamelCase__: List[str] = dropout_rate
lowerCamelCase__: Dict = drop_connect_rate
lowerCamelCase__: Dict = sum(__a ) * 4
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return 1e-5
| 242 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=50 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=None , ) -> str:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = use_labels
A_ = scope
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.prepare_config_and_inputs()
A_ = True
A_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
A_ = BertGenerationEncoder(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
A_ = True
A_ = BertGenerationEncoder(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
A_ = True
A_ = True
A_ = BertGenerationDecoder(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
# first forward pass
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
A_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = torch.cat([input_mask, next_mask] , dim=-1 )
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
A_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , ) -> Any:
'''simple docstring'''
A_ = BertGenerationDecoder(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ , A_ , A_ , A_ = self.prepare_config_and_inputs()
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = BertGenerationEncoderTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ , A_ , A_ , A_ = self.model_tester.prepare_config_and_inputs()
A_ = """bert"""
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A_ = model(UpperCamelCase__ )[0]
A_ = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , UpperCamelCase__ )
A_ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
A_ = model(UpperCamelCase__ )[0]
A_ = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , UpperCamelCase__ )
A_ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 288 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=12 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=0 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = projection_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = dropout
A_ = attention_dropout
A_ = max_position_embeddings
A_ = initializer_range
A_ = scope
A_ = bos_token_id
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A_ = input_mask.numpy()
A_ , A_ = input_mask.shape
A_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
A_ = 1
A_ = 0
A_ = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = TFBlipTextModel(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , unittest.TestCase ):
lowercase = (TFBlipTextModel,) if is_tf_available() else ()
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = BlipTextModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
pass
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 288 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=0.9 , lowerCAmelCase=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =patch_size
_lowercase =tubelet_size
_lowercase =num_frames
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =mask_ratio
_lowercase =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowercase =(image_size // patch_size) ** 2
_lowercase =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowercase =int(mask_ratio * self.seq_length )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> List[str]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =VideoMAEModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
_lowercase =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =VideoMAEForPreTraining(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase =torch.ones((self.num_masks,) )
_lowercase =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowercase =mask.expand(self.batch_size , -1 ).bool()
_lowercase =model(lowerCAmelCase , lowerCAmelCase )
# model only returns predictions for masked patches
_lowercase =mask.sum().item()
_lowercase =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
_lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_a = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =VideoMAEModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> List[str]:
'''simple docstring'''
_lowercase =copy.deepcopy(lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase =torch.ones((self.model_tester.num_masks,) )
_lowercase =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowercase =mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowercase =bool_masked_pos.to(lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase ),
]:
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =VideoMAEModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
if not self.has_attentions:
pass
else:
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
for model_class in self.all_model_classes:
_lowercase =self.model_tester.seq_length - self.model_tester.num_masks
_lowercase =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowercase =True
_lowercase =False
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowercase =len(lowerCAmelCase )
# Check attention is always last and order is fine
_lowercase =True
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase ) )
_lowercase =outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.hidden_states
_lowercase =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
_lowercase =self.model_tester.seq_length - self.model_tester.num_masks
_lowercase =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def a ( ) -> Tuple:
"""simple docstring"""
_lowercase =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowercase =np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_video()
_lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
# verify the logits
_lowercase =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
_lowercase =torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_video()
_lowercase =image_processor(lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# add boolean mask, indicating which patches to mask
_lowercase =hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_lowercase =torch.load(lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
# verify the logits
_lowercase =torch.Size([1, 1_408, 1_536] )
_lowercase =torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowerCAmelCase )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowercase =torch.tensor([0.5142] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowercase =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowerCAmelCase ).to(
lowerCAmelCase )
with torch.no_grad():
_lowercase =model(**lowerCAmelCase )
_lowercase =torch.tensor(torch.tensor([0.6469] ) , device=lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase , atol=1e-4 ) )
| 719 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def a ( A__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase , lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel(lowerCAmelCase )
_lowercase =model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase , _lowercase =self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase )
_lowercase =model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase , _lowercase =self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
_lowercase ={'vision_model': vision_model, 'text_model': text_model}
_lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase )
_lowercase =model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase , _lowercase =self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase )
_lowercase =model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
_lowercase =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
_lowercase =model(input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase )
_lowercase =after_output[0].numpy()
_lowercase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-5 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase , _lowercase =self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase )
_lowercase =model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase )
_lowercase =output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase =to_atuple(vision_model.config.image_size )
_lowercase =to_atuple(vision_model.config.patch_size )
_lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowercase =output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase , lowerCAmelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase , _lowercase =self.get_pretrained_model_and_inputs()
_lowercase =model_a(**lowerCAmelCase )
_lowercase =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase )
_lowercase =model_a(**lowerCAmelCase )
_lowercase =after_outputs[0].numpy()
_lowercase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase , 1e-5 )
@require_tf
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
_lowercase =13
_lowercase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowercase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowercase =random_attention_mask([batch_size, 4] )
_lowercase ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =TFViTModel(lowerCAmelCase , name='vision_model' )
_lowercase =TFBertModel(lowerCAmelCase , name='text_model' )
return vision_model, text_model
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =TFViTModelTester(self )
_lowercase =TFBertModelTester(self )
_lowercase =vit_model_tester.prepare_config_and_inputs()
_lowercase =bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =vision_config_and_inputs
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
_lowercase =13
_lowercase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowercase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowercase =random_attention_mask([batch_size, 4] )
_lowercase ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase =self.get_vision_text_model(lowerCAmelCase , lowerCAmelCase )
_lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase , text_model=lowerCAmelCase )
_lowercase =model(
input_ids=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , output_attentions=lowerCAmelCase )
_lowercase =output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowercase =to_atuple(vision_model.config.image_size )
_lowercase =to_atuple(vision_model.config.patch_size )
_lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowercase =output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =TFDeiTModel(lowerCAmelCase , name='vision_model' )
_lowercase =TFRobertaModel(lowerCAmelCase , name='text_model' )
return vision_model, text_model
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =TFDeiTModelTester(self )
_lowercase =TFRobertaModelTester(self )
_lowercase =vit_model_tester.prepare_config_and_inputs()
_lowercase =bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =vision_config_and_inputs
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
_lowercase =13
_lowercase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowercase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowercase =random_attention_mask([batch_size, 4] )
_lowercase ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =TFCLIPVisionModel(lowerCAmelCase , name='vision_model' )
_lowercase =TFBertModel(lowerCAmelCase , name='text_model' )
return vision_model, text_model
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =TFCLIPVisionModelTester(self )
_lowercase =TFBertModelTester(self )
_lowercase =clip_model_tester.prepare_config_and_inputs()
_lowercase =bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase =vision_config_and_inputs
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase )
_lowercase =VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase =processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase , padding=lowerCAmelCase , return_tensors='np' )
_lowercase =model(**lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowercase =np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase , atol=1e-3 ) )
| 380 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_A : Optional[int] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_A : Optional[Any] = logging.getLogger()
def UpperCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowerCAmelCase = parser.parse_args()
return args.f
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : str="eval" ) -> str:
'''simple docstring'''
__lowerCAmelCase = os.path.join(snake_case_ , f"""{split}_results.json""" )
if os.path.exists(snake_case_ ):
with open(snake_case_ , """r""" ) as f:
return json.load(snake_case_ )
raise ValueError(f"""can't find {path}""" )
_A : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_flax_glue.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
@slow
def a ( self : Optional[Any] ) -> Dict:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_clm_flax.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def a ( self : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_summarization_flax.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def a ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_mlm_flax.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def a ( self : Any ) -> Tuple:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_ta_mlm_flax.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 )
@slow
def a ( self : Optional[int] ) -> List[str]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowerCAmelCase = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_flax_ner.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def a ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_qa.main()
__lowerCAmelCase = get_results(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 427 | '''simple docstring'''
import math
def UpperCamelCase_ ( snake_case_ : float , snake_case_ : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 427 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = FileLock(str(tmpdir / "foo.lock" ) )
lowercase :Tuple = FileLock(str(tmpdir / "foo.lock" ) )
lowercase :str = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
lowercase :str = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = "a" * 1000 + ".lock"
lowercase :Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowercase :Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 719 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __lowerCAmelCase :
def __init__( self: Dict , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Any ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
lowercase :str = model
lowercase :Any = kwargs.get("model_save_dir" , _lowerCAmelCase )
lowercase :Dict = kwargs.get("latest_model_name" , _lowerCAmelCase )
def __call__( self: Optional[Any] , **_lowerCAmelCase: List[Any] ):
lowercase :List[str] = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: List[Any]=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
lowercase :int = "CPUExecutionProvider"
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: Optional[str] = None , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase :Tuple = self.model_save_dir.joinpath(self.latest_model_name )
lowercase :Optional[int] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase :Tuple = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
lowercase :Dict = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: int , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Dict , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: Optional[Union[bool, str, None]] = None , _lowerCAmelCase: Optional[Union[str, None]] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional["ort.SessionOptions"] = None , **_lowerCAmelCase: Optional[int] , ):
lowercase :List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
lowercase :Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
lowercase :Optional[Any] = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
lowercase :str = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
lowercase :Optional[int] = Path(_lowerCAmelCase ).parent
lowercase :Tuple = Path(_lowerCAmelCase ).name
lowercase :Tuple = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: str , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , **_lowerCAmelCase: Any , ):
lowercase :List[str] = None
if len(str(_lowerCAmelCase ).split("@" ) ) == 2:
lowercase , lowercase :Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 453 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : int =f"Input value of [number={number}] must be an integer"
raise TypeError(__lowerCamelCase )
if is_prime(__lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446 |
import cva
import numpy as np
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.k )
def _lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(lowerCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ ,A_ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 393 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase ( __snake_case ):
a: torch.FloatTensor
class UpperCAmelCase ( __snake_case , __snake_case ):
@register_to_config
def __init__( self: str , __UpperCamelCase: int = 6_5536 , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 2 , __UpperCamelCase: int = 0 , __UpperCamelCase: str = "fourier" , __UpperCamelCase: bool = True , __UpperCamelCase: bool = False , __UpperCamelCase: float = 0.0 , __UpperCamelCase: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __UpperCamelCase: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __UpperCamelCase: Tuple[str] = "UNetMidBlock1D" , __UpperCamelCase: str = None , __UpperCamelCase: Tuple[int] = (32, 32, 64) , __UpperCamelCase: str = None , __UpperCamelCase: int = 8 , __UpperCamelCase: int = 1 , __UpperCamelCase: bool = False , ):
super().__init__()
_a = sample_size
# time
if time_embedding_type == "fourier":
_a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__UpperCamelCase , log=__UpperCamelCase , flip_sin_to_cos=__UpperCamelCase )
_a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__UpperCamelCase , downscale_freq_shift=__UpperCamelCase )
_a = block_out_channels[0]
if use_timestep_embedding:
_a = block_out_channels[0] * 4
_a = TimestepEmbedding(
in_channels=__UpperCamelCase , time_embed_dim=__UpperCamelCase , act_fn=__UpperCamelCase , out_dim=block_out_channels[0] , )
_a = nn.ModuleList([] )
_a = None
_a = nn.ModuleList([] )
_a = None
# down
_a = in_channels
for i, down_block_type in enumerate(__UpperCamelCase ):
_a = output_channel
_a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_a = i == len(__UpperCamelCase ) - 1
_a = get_down_block(
__UpperCamelCase , num_layers=__UpperCamelCase , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__UpperCamelCase )
# mid
_a = get_mid_block(
__UpperCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__UpperCamelCase , add_downsample=__UpperCamelCase , )
# up
_a = list(reversed(__UpperCamelCase ) )
_a = reversed_block_out_channels[0]
if out_block_type is None:
_a = out_channels
else:
_a = block_out_channels[0]
for i, up_block_type in enumerate(__UpperCamelCase ):
_a = output_channel
_a = (
reversed_block_out_channels[i + 1] if i < len(__UpperCamelCase ) - 1 else final_upsample_channels
)
_a = i == len(__UpperCamelCase ) - 1
_a = get_up_block(
__UpperCamelCase , num_layers=__UpperCamelCase , in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__UpperCamelCase )
_a = output_channel
# out
_a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_a = get_out_block(
out_block_type=__UpperCamelCase , num_groups_out=__UpperCamelCase , embed_dim=block_out_channels[0] , out_channels=__UpperCamelCase , act_fn=__UpperCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def _A ( self: List[Any] , __UpperCamelCase: torch.FloatTensor , __UpperCamelCase: Union[torch.Tensor, float, int] , __UpperCamelCase: bool = True , ):
_a = timestep
if not torch.is_tensor(__UpperCamelCase ):
_a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__UpperCamelCase ) and len(timesteps.shape ) == 0:
_a = timesteps[None].to(sample.device )
_a = self.time_proj(__UpperCamelCase )
if self.config.use_timestep_embedding:
_a = self.time_mlp(__UpperCamelCase )
else:
_a = timestep_embed[..., None]
_a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_a = ()
for downsample_block in self.down_blocks:
_a , _a = downsample_block(hidden_states=__UpperCamelCase , temb=__UpperCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_a = self.mid_block(__UpperCamelCase , __UpperCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_a = down_block_res_samples[-1:]
_a = down_block_res_samples[:-1]
_a = upsample_block(__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , temb=__UpperCamelCase )
# 5. post-process
if self.out_block:
_a = self.out_block(__UpperCamelCase , __UpperCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__UpperCamelCase )
| 346 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :List[str] = {}
class UpperCAmelCase ( __snake_case ):
a: str = "llama"
a: List[str] = ["past_key_values"]
def __init__( self: Tuple , __UpperCamelCase: Optional[Any]=3_2000 , __UpperCamelCase: Optional[int]=4096 , __UpperCamelCase: Union[str, Any]=1_1008 , __UpperCamelCase: str=32 , __UpperCamelCase: List[str]=32 , __UpperCamelCase: Tuple=None , __UpperCamelCase: Dict="silu" , __UpperCamelCase: Any=2048 , __UpperCamelCase: Optional[int]=0.0_2 , __UpperCamelCase: int=1E-6 , __UpperCamelCase: List[Any]=True , __UpperCamelCase: List[str]=0 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: str=2 , __UpperCamelCase: int=1 , __UpperCamelCase: Optional[Any]=False , __UpperCamelCase: int=None , **__UpperCamelCase: Optional[int] , ):
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_a = num_attention_heads
_a = num_key_value_heads
_a = hidden_act
_a = initializer_range
_a = rms_norm_eps
_a = pretraining_tp
_a = use_cache
_a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , )
def _A ( self: Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
_a = self.rope_scaling.get('''type''' , __UpperCamelCase )
_a = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 346 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__snake_case = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , x.transpose() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , transpose(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , transpose(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE ) , np.asarray(transpose(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(__SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(__SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(__SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(__SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(__SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(__SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
__snake_case = np.random.randn(3 , 4 , 5 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(__SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , np.squeeze(__SCREAMING_SNAKE_CASE ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , squeeze(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(__SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , squeeze(__SCREAMING_SNAKE_CASE ).numpy() ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(__SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(1 , 3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(__SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = np.random.randn(1 , 4 , 1 , 5 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(__SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = torch.tensor(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = tf.constant(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = np.random.randn(3 , 4 )
__snake_case = jnp.array(__SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(__SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 24 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__magic_name__ : Union[str, Any] = get_logger()
__magic_name__ : Optional[dict] = None
class A__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
super().__init__(features=_SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Expected {device} to be a `str` not {type(_SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
UpperCamelCase = device if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
UpperCamelCase = str(jax.devices()[0] )
UpperCamelCase = jnp_array_kwargs
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import jax
return {str(_SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(_SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_SCREAMING_SNAKE_CASE , axis=0 )
return column
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , (str, bytes, type(_SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(_SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase = {}
if isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase = {'dtype': jnp.intaa}
else:
UpperCamelCase = {'dtype': jnp.intaa}
elif isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_SCREAMING_SNAKE_CASE , '__array__' ) and not isinstance(_SCREAMING_SNAKE_CASE , jax.Array ):
UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _SCREAMING_SNAKE_CASE , map_list=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_row(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_row(_SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_column(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_column(_SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._consolidate(_SCREAMING_SNAKE_CASE )
return column
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
for column_name in batch:
UpperCamelCase = self._consolidate(batch[column_name] )
return batch
| 410 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """visual_bert"""
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=3_0522 , _SCREAMING_SNAKE_CASE : Dict=768 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[Any]=12 , _SCREAMING_SNAKE_CASE : Any=12 , _SCREAMING_SNAKE_CASE : Any=3072 , _SCREAMING_SNAKE_CASE : Dict="gelu" , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : List[Any]=2 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : Any=1E-1_2 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Dict=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : str=2 , **_SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 410 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Dict = 16
a_ : int = 32
def __lowerCAmelCase ( _UpperCamelCase : Accelerator , _UpperCamelCase : DatasetDict , _UpperCamelCase : List[int] , _UpperCamelCase : List[int] , _UpperCamelCase : int = 16 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE = DatasetDict(
{
'train': dataset['train'].select(_UpperCamelCase ),
'validation': dataset['train'].select(_UpperCamelCase ),
'test': dataset['validation'],
} )
def tokenize_function(_UpperCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_UpperCamelCase , padding='longest' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets['test'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
# Download the dataset
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' )
# Create our splits
SCREAMING_SNAKE_CASE = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE = int(config['seed'] )
SCREAMING_SNAKE_CASE = int(config['batch_size'] )
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
set_seed(_UpperCamelCase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
SCREAMING_SNAKE_CASE = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_UpperCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_fold_dataloaders(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.loss
SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE = []
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE = torch.cat(_UpperCamelCase , dim=0 )
SCREAMING_SNAKE_CASE = torch.stack(_UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE = metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
accelerator.print('Average test metrics from all folds:' , _UpperCamelCase )
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=_UpperCamelCase , default=3 , help='The number of splits to perform across the dataset' )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 439 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a_ : int = None
a_ : List[Any] = logging.get_logger(__name__)
a_ : List[Any] = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
a_ : Optional[int] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
a_ : Union[str, Any] = {
"google/rembert": 256,
}
a_ : List[Any] = "▁"
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =RemBertTokenizer
def __init__( self : int , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=False , snake_case__ : Dict="[CLS]" , snake_case__ : List[str]="[SEP]" , snake_case__ : List[str]="<unk>" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="<pad>" , snake_case__ : Tuple="[CLS]" , snake_case__ : str="[MASK]" , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(snake_case__ ) )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 439 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Dict ):
'''simple docstring'''
__lowerCAmelCase = u
for i in range(1 , lowerCamelCase ):
__lowerCAmelCase = temp * (u - i)
return temp
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = int(input("enter the numbers of values: " ) )
__lowerCAmelCase = []
for _ in range(lowerCamelCase ):
y.append([] )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
y[i].append(lowerCamelCase )
__lowerCAmelCase = 0
print("enter the values of parameters in a list: " )
__lowerCAmelCase = list(map(lowerCamelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCamelCase ):
__lowerCAmelCase = float(input() )
__lowerCAmelCase = int(input("enter the value to interpolate: " ) )
__lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase ):
for j in range(n - i ):
__lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__lowerCAmelCase = y[0][0]
for i in range(1 , lowerCamelCase ):
summ += (ucal(lowerCamelCase , lowerCamelCase ) * y[0][i]) / math.factorial(lowerCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main() | 720 |
'''simple docstring'''
def __lowerCAmelCase ( lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution()) | 39 | 0 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_A = get_logger(__name__)
_A = R"""\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"""
class SCREAMING_SNAKE_CASE_ :
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class SCREAMING_SNAKE_CASE_ :
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
@add_start_docstrings(UpperCAmelCase__ )
def __call__( self , lowercase , lowercase , lowercase , **lowercase ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
__SCREAMING_SNAKE_CASE : Any = inspect.signature(processor.__call__ ).parameters
if len(UpperCAmelCase__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
__SCREAMING_SNAKE_CASE : Optional[int] = processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[str] = processor(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
__SCREAMING_SNAKE_CASE : List[Any] = temperature
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase , lowercase = -float('''Inf''' ) , lowercase = 1 ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
__SCREAMING_SNAKE_CASE : Any = top_p
__SCREAMING_SNAKE_CASE : Optional[int] = filter_value
__SCREAMING_SNAKE_CASE : int = min_tokens_to_keep
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = lax.top_k(UpperCAmelCase__ , scores.shape[-1] )
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.full_like(UpperCAmelCase__ , self.filter_value )
__SCREAMING_SNAKE_CASE : List[Any] = jax.nn.softmax(UpperCAmelCase__ , axis=-1 ).cumsum(axis=-1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__SCREAMING_SNAKE_CASE : Tuple = jnp.roll(UpperCAmelCase__ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase__ )
# min tokens to keep
__SCREAMING_SNAKE_CASE : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = jnp.where(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = jax.lax.sort_key_val(UpperCAmelCase__ , UpperCAmelCase__ )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase , lowercase = -float('''Inf''' ) , lowercase = 1 ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
__SCREAMING_SNAKE_CASE : List[Any] = max(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = filter_value
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = scores.shape
__SCREAMING_SNAKE_CASE : int = jnp.full(batch_size * vocab_size , self.filter_value )
__SCREAMING_SNAKE_CASE : Optional[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
__SCREAMING_SNAKE_CASE : Tuple = lax.top_k(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = jnp.broadcast_to((jnp.arange(UpperCAmelCase__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__SCREAMING_SNAKE_CASE : Optional[int] = topk_scores.flatten()
__SCREAMING_SNAKE_CASE : Optional[int] = topk_indices.flatten() + shift
__SCREAMING_SNAKE_CASE : Any = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = next_scores_flat.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
return next_scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.full(scores.shape , -float('''inf''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase__ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase__ )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = max_length
__SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = jnp.full(scores.shape , -float('''inf''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase__ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase__ )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
__SCREAMING_SNAKE_CASE : List[Any] = min_length
__SCREAMING_SNAKE_CASE : List[str] = eos_token_id
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(UpperCAmelCase__ , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , UpperCAmelCase__ )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase , lowercase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = list(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = begin_index
def __call__( self , lowercase , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = 1 - jnp.bool_(cur_len - self.begin_index )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.where(UpperCAmelCase__ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , UpperCAmelCase__ )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = list(UpperCAmelCase__ )
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = dict(UpperCAmelCase__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = force_token_array.at[index].set(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = jnp.intaa(UpperCAmelCase__ )
def __call__( self , lowercase , lowercase , lowercase ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(lowercase ):
__SCREAMING_SNAKE_CASE : Tuple = scores.shape[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.force_token_array[generation_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.ones_like(UpperCAmelCase__ , dtype=scores.dtype ) * -float('''inf''' )
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__SCREAMING_SNAKE_CASE : List[str] = lax.dynamic_update_slice(UpperCAmelCase__ , UpperCAmelCase__ , (0, current_token) )
return new_scores
__SCREAMING_SNAKE_CASE : Tuple = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase__ ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = generate_config.eos_token_id
__SCREAMING_SNAKE_CASE : Tuple = generate_config.no_timestamps_token_id
__SCREAMING_SNAKE_CASE : Optional[Any] = generate_config.no_timestamps_token_id + 1
__SCREAMING_SNAKE_CASE : Dict = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase__ , '''max_initial_timestamp_index''' ):
__SCREAMING_SNAKE_CASE : str = generate_config.max_initial_timestamp_index
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_config.vocab_size
def __call__( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(lowercase , lowercase ):
__SCREAMING_SNAKE_CASE : int = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase__ , UpperCAmelCase__ , )
return jnp.where(
UpperCAmelCase__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE : int = jax.vmap(UpperCAmelCase__ )(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = jnp.where(cur_len == self.begin_index , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = self.timestamp_begin + self.max_initial_timestamp_index
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(
UpperCAmelCase__ , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , UpperCAmelCase__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__SCREAMING_SNAKE_CASE : Optional[int] = jax.nn.log_softmax(UpperCAmelCase__ , axis=-1 )
def handle_cumulative_probs(lowercase , lowercase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = jax.vmap(UpperCAmelCase__ )(UpperCAmelCase__ , UpperCAmelCase__ )
return scores
| 158 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Any = '''rwkv'''
UpperCamelCase : Optional[int] = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[int]=50277 , UpperCAmelCase__ : List[Any]=1024 , UpperCAmelCase__ : Dict=4096 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=1E-5 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) -> Optional[int]:
_a : Dict = vocab_size
_a : List[Any] = context_length
_a : int = hidden_size
_a : Dict = num_hidden_layers
_a : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
_a : Any = intermediate_size if intermediate_size is not None else 4 * hidden_size
_a : int = layer_norm_epsilon
_a : List[str] = rescale_every
_a : List[str] = use_cache
_a : List[str] = bos_token_id
_a : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 389 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000 ) ->int:
a__: List[Any] = 3
a__: Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 217 | """simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowercase__ = None
try:
import msvcrt
except ImportError:
lowercase__ = None
try:
import fcntl
except ImportError:
lowercase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowercase__ = OSError
# Data
# ------------------------------------------------
lowercase__ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowercase__ = '3.0.12'
lowercase__ = None
def __a ( ) ->str:
global _logger
a__: List[Any] = _logger or logging.getLogger(__name__ )
return _logger
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Any = lock_file
return None
def __str__( self) -> str:
'''simple docstring'''
a__: Optional[int] = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __snake_case :
def __init__( self , lowercase) -> List[str]:
'''simple docstring'''
a__: int = lock
return None
def __enter__( self) -> Dict:
'''simple docstring'''
return self.lock
def __exit__( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
self.lock.release()
return None
class __snake_case :
def __init__( self , lowercase , lowercase=-1 , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
a__: List[str] = self.hash_filename_if_too_long(lowercase , lowercase)
# The path to the lock file.
a__: Any = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a__: List[Any] = None
# The default timeout value.
a__: str = timeout
# We use this lock primarily for the lock counter.
a__: Union[str, Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a__: Tuple = 0
return None
@property
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
a__: str = float(lowercase)
return None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase_ ( self , lowercase=None , lowercase=0.05) -> Any:
'''simple docstring'''
if timeout is None:
a__: Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a__: Optional[int] = id(self)
a__: List[Any] = self._lock_file
a__: List[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}')
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}')
raise Timeout(self._lock_file)
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...')
time.sleep(lowercase)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a__: Any = max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def lowerCamelCase_ ( self , lowercase=False) -> Optional[int]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a__: Dict = id(self)
a__: Any = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}')
self._release()
a__: Tuple = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}')
return None
def __enter__( self) -> List[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
self.release()
return None
def __del__( self) -> Dict:
'''simple docstring'''
self.release(force=lowercase)
return None
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Optional[int] = os.path.basename(lowercase)
if len(lowercase) > max_length and max_length > 0:
a__: Dict = os.path.dirname(lowercase)
a__: Union[str, Any] = str(hash(lowercase))
a__: int = filename[: max_length - len(lowercase) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowercase , lowercase)
else:
return path
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase=-1 , lowercase=None) -> Optional[Any]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase)
a__: Any = '\\\\?\\' + relative_to_absolute_path(self.lock_file)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a__: Optional[Any] = os.open(self._lock_file , lowercase)
except OSError:
pass
else:
try:
msvcrt.locking(lowercase , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(lowercase)
else:
a__: str = fd
return None
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = self._lock_file_fd
a__: Optional[int] = None
msvcrt.locking(lowercase , msvcrt.LK_UNLCK , 1)
os.close(lowercase)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase=-1 , lowercase=None) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = os.statvfs(os.path.dirname(lowercase)).f_namemax
super().__init__(lowercase , timeout=lowercase , max_filename_length=lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
a__: Union[str, Any] = os.open(self._lock_file , lowercase)
try:
fcntl.flock(lowercase , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowercase)
else:
a__: List[str] = fd
return None
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = self._lock_file_fd
a__: Dict = None
fcntl.flock(lowercase , fcntl.LOCK_UN)
os.close(lowercase)
return None
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a__: Optional[Any] = os.open(self._lock_file , lowercase)
except OSError:
pass
else:
a__: Any = fd
return None
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
os.close(self._lock_file_fd)
a__: Optional[Any] = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowercase__ = None
if msvcrt:
lowercase__ = WindowsFileLock
elif fcntl:
lowercase__ = UnixFileLock
else:
lowercase__ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 217 | 1 |
'''simple docstring'''
def A_( A : str):
UpperCamelCase = [0] * len(A)
for i in range(1 , len(A)):
# use last results for better performance - dynamic programming
UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCamelCase = j
return prefix_result
def A_( A : str):
return max(prefix_function(A))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase_ = 'facebook/wmt19-en-de'
UpperCamelCase_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase_ = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCamelCase_ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCamelCase_ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
UpperCamelCase_ = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 510 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( A ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case_ ( a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( A_ ) -> Union[str, Any]:
UpperCAmelCase__ =parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=A_, required=A_, help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint", type=A_, required=A_, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=A_, required=A_, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=A_, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=A_, default=A_, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=A_ )
def __init__( self, A_, A_, A_, A_, A_, *A_, ) -> List[str]:
UpperCAmelCase__ =logging.get_logger("transformers-cli/converting" )
self._logger.info(f"""Loading model {model_type}""" )
UpperCAmelCase__ =model_type
UpperCAmelCase__ =tf_checkpoint
UpperCAmelCase__ =pytorch_dump_output
UpperCAmelCase__ =config
UpperCAmelCase__ =finetuning_task_name
def __UpperCAmelCase ( self ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
else:
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
convert_transfo_xl_checkpoint_to_pytorch(
A_, self._config, self._pytorch_dump_output, A_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 510 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = "facebook/bart-large-mnli"
SCREAMING_SNAKE_CASE : Union[str, Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
SCREAMING_SNAKE_CASE : List[str] = "text_classifier"
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer
SCREAMING_SNAKE_CASE : int = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Any = ["text", ["text"]]
SCREAMING_SNAKE_CASE : Tuple = ["text"]
def snake_case__( self : Any ) ->Tuple:
super().setup()
snake_case_ = self.model.config
snake_case_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
snake_case_ = int(_UpperCamelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def snake_case__( self : int , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Union[str, Any]:
snake_case_ = labels
return self.pre_processor(
[text] * len(_UpperCamelCase ) , [f'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def snake_case__( self : int , _UpperCamelCase : str ) ->Dict:
snake_case_ = outputs.logits
snake_case_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 39 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=None , ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = size if size is not None else {"height": 20, "width": 20}
lowercase__ : Dict = parent
lowercase__ : Dict = batch_size
lowercase__ : str = num_channels
lowercase__ : Union[str, Any] = image_size
lowercase__ : str = min_resolution
lowercase__ : int = max_resolution
lowercase__ : Optional[Any] = size
lowercase__ : Optional[int] = do_normalize
lowercase__ : Union[str, Any] = do_convert_rgb
lowercase__ : Optional[Any] = [512, 1024, 2048, 4096]
lowercase__ : int = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __a ( self ) -> Optional[int]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ : str = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Dict = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def __a ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = self.image_processor_tester.prepare_dummy_image()
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
lowercase__ : Union[str, Any] = 2048
lowercase__ : int = image_processor(lowerCamelCase , return_tensors="pt" , max_patches=lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowercase__ : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : int = image_processor(
lowerCamelCase , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowercase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCamelCase ):
lowercase__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
lowercase__ : Union[str, Any] = "Hello"
lowercase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase , header_text=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Dict = image_processor(
lowerCamelCase , return_tensors="pt" , max_patches=lowerCamelCase , header_text=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
lowercase__ : Optional[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Optional[int] = image_processor(
lowerCamelCase , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Optional[int] = image_processor(
lowerCamelCase , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
lowercase__ : Optional[Any] = 3
@property
def __a ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowercase__ : Dict = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ : Dict = image_processor(
lowerCamelCase , return_tensors="pt" , max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 298 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = """visual_bert"""
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=512 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1E-12 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Any = max_position_embeddings
lowercase__ : str = hidden_size
lowercase__ : Optional[int] = visual_embedding_dim
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = type_vocab_size
lowercase__ : int = layer_norm_eps
lowercase__ : Union[str, Any] = bypass_transformer
lowercase__ : Dict = special_visual_initialize | 298 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A = 1
A = 1
while repunit:
A = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _snake_case ( snake_case__ : int = 100_0000 ):
A = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""") | 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase : str = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 584 | 0 |
from __future__ import annotations
def A ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
return [ord(__UpperCAmelCase ) - 96 for elem in plain]
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def A ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __UpperCAmelCase )
print('''Decoded:''' , decode(__UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 561 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase_ = logging.get_logger(__name__)
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase_ = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase_ = json.loads(__UpperCAmelCase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , __UpperCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : str =field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __a ( self :List[Any]) -> int:
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , _lowercase , )
@cached_property
def __a ( self :List[Any]) -> "torch.device":
logger.info('''PyTorch: setting up devices''')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''')
if self.no_cuda:
UpperCAmelCase_ = torch.device('''cpu''')
UpperCAmelCase_ = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase_ = smp.local_rank()
UpperCAmelCase_ = torch.device('''cuda''' , _lowercase)
UpperCAmelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta)
UpperCAmelCase_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK'''))
UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank)
UpperCAmelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta)
UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank)
UpperCAmelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(_lowercase)
return device
@property
def __a ( self :str) -> Dict:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __a ( self :Optional[Any]) -> Optional[int]:
return not is_sagemaker_model_parallel_available()
@property
def __a ( self :List[str]) -> List[Any]:
return False
| 561 | 1 |
'''simple docstring'''
from collections.abc import Callable
class _snake_case :
def __init__( self ,_snake_case = None ):
# Stores actual heap items.
UpperCAmelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase_ : dict = {}
# Stores current size of heap.
UpperCAmelCase_ : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase_ : str = key or (lambda _snake_case : x)
def UpperCamelCase__ ( self ,_snake_case ):
return int((i - 1) / 2 ) if i > 0 else None
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.arr[j], self.arr[i]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return self.arr[i][1] < self.arr[j][1]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Dict = self._left(_snake_case )
UpperCAmelCase_ : Dict = self._right(_snake_case )
UpperCAmelCase_ : Tuple = i
if left is not None and not self._cmp(_snake_case ,_snake_case ):
UpperCAmelCase_ : str = left
if right is not None and not self._cmp(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = right
return valid_parent
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = self._parent(_snake_case )
while parent is not None and not self._cmp(_snake_case ,_snake_case ):
self._swap(_snake_case ,_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parent, self._parent(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self._get_valid_parent(_snake_case )
while valid_parent != index:
self._swap(_snake_case ,_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Any = valid_parent, self._get_valid_parent(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
if item not in self.pos_map:
return
UpperCAmelCase_ : int = self.pos_map[item]
UpperCAmelCase_ : Union[str, Any] = [item, self.key(_snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
if item not in self.pos_map:
return
UpperCAmelCase_ : List[str] = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase_ : Dict = self.arr[self.size - 1]
UpperCAmelCase_ : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_snake_case )
self._heapify_down(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_snake_case )] )
else:
UpperCAmelCase_ : Optional[Any] = [item, self.key(_snake_case )]
UpperCAmelCase_ : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCamelCase__ ( self ):
return self.arr[0] if self.size else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def a__ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CpmAntTokenizer
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : str = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__a : int = '今天天气真好!'
__a : int = ['今天', '天气', '真', '好', '!']
__a : Optional[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__a : Dict = '今天天气真好!'
__a : Union[str, Any] = [tokenizer.bos_token] + tokens
__a : int = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
__a : Any = tokenizer.decode(__a )
self.assertEqual(__a , __a )
| 476 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class _a ( __lowerCamelCase):
"""simple docstring"""
UpperCamelCase__ = """open-llama"""
def __init__( self : Optional[int] , __UpperCamelCase : Any=1_0_0_0_0_0 , __UpperCamelCase : Union[str, Any]=4_0_9_6 , __UpperCamelCase : int=1_1_0_0_8 , __UpperCamelCase : Dict=3_2 , __UpperCamelCase : List[Any]=3_2 , __UpperCamelCase : Optional[int]="silu" , __UpperCamelCase : Optional[int]=2_0_4_8 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Optional[Any]=1e-6 , __UpperCamelCase : int=True , __UpperCamelCase : Any=0 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Any=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Tuple=True , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Dict , )->str:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'''use_memorry_efficient_attention''' , UpperCAmelCase_ )
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ , )
def lowercase__ ( self : List[str] )->Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'got {self.rope_scaling}' )
_UpperCAmelCase = self.rope_scaling.get('''type''' , UpperCAmelCase_ )
_UpperCAmelCase = self.rope_scaling.get('''factor''' , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 706 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__A : Tuple = None
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
__A : Optional[int] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
__A : List[Any] = "▁"
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """token_type_ids"""]
UpperCamelCase__ = FNetTokenizer
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[int]="[CLS]" , __UpperCamelCase : List[Any]="[MASK]" , **__UpperCamelCase : Optional[Any] , )->Dict:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 95 | 0 |
def lowerCamelCase__ ( _a , _a):
return 1 if input_a == input_a else 0
def lowerCamelCase__ ( ):
assert xnor_gate(0 , 0) == 1
assert xnor_gate(0 , 1) == 0
assert xnor_gate(1 , 0) == 0
assert xnor_gate(1 , 1) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 25 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : Dict ,lowercase_ : List[str]=7 ,lowercase_ : Tuple=3 ,lowercase_ : List[str]=1_8 ,lowercase_ : Optional[Any]=3_0 ,lowercase_ : List[Any]=4_0_0 ,lowercase_ : List[Any]=True ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=True ,lowercase_ : str=None ,lowercase_ : List[Any]=True ,lowercase_ : Dict=[0.5, 0.5, 0.5] ,lowercase_ : Dict=[0.5, 0.5, 0.5] ,):
lowerCAmelCase__ : Any = size if size is not None else {'''shortest_edge''': 1_8}
lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Union[str, Any] = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Tuple = do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean
lowerCAmelCase__ : int = image_std
def __lowerCAmelCase ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[Any] = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size ,{'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : int ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 450 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ): # noqa: E741
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * n
lowerCAmelCase__ = [False] * n
lowerCAmelCase__ = [False] * n
def dfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if parent == root:
out_edge_count += 1
lowerCAmelCase__ = True
lowerCAmelCase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCAmelCase__ = dfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCAmelCase__ = True
# AP found via cycle
if at == low[to]:
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = min(low[at] , lowerCamelCase__ )
return out_edge_count
for i in range(lowerCamelCase__ ):
if not visited[i]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = dfs(lowerCamelCase__ , lowerCamelCase__ , -1 , lowerCamelCase__ )
lowerCAmelCase__ = out_edge_count > 1
for x in range(len(lowerCamelCase__ ) ):
if is_art[x] is True:
print(lowerCamelCase__ )
# Adjacency list of graph
__lowerCAmelCase : str = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 704 | """simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__SCREAMING_SNAKE_CASE : Tuple = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_UpperCAmelCase : Tuple = "lm_head"
_UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
_UpperCAmelCase : Dict = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
_UpperCAmelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCAmelCase : Tuple = value
elif weight_type == "weight_g":
_UpperCAmelCase : int = value
elif weight_type == "weight_v":
_UpperCAmelCase : Dict = value
elif weight_type == "bias":
_UpperCAmelCase : Tuple = value
else:
_UpperCAmelCase : List[str] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Union[str, Any] = fairseq_model.state_dict()
_UpperCAmelCase : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : Optional[int] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCAmelCase : int = True
if "*" in mapped_key:
_UpperCAmelCase : Tuple = name.split(_UpperCAmelCase )[0].split("." )[-2]
_UpperCAmelCase : str = mapped_key.replace("*" , _UpperCAmelCase )
if "weight_g" in name:
_UpperCAmelCase : int = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
_UpperCAmelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : List[str] = "weight"
else:
_UpperCAmelCase : Optional[int] = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = full_name.split("conv_layers." )[-1]
_UpperCAmelCase : Optional[Any] = name.split("." )
_UpperCAmelCase : Union[str, Any] = int(items[0] )
_UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCAmelCase : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCAmelCase : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=True ) -> List[str]:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase : Optional[Any] = UniSpeechConfig.from_pretrained(_UpperCAmelCase )
else:
_UpperCAmelCase : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
_UpperCAmelCase : Tuple = Dictionary.load_from_json(_UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase : str = target_dict.pad_index
_UpperCAmelCase : Union[str, Any] = target_dict.bos_index
_UpperCAmelCase : str = target_dict.eos_index
_UpperCAmelCase : int = len(target_dict.symbols )
_UpperCAmelCase : int = os.path.join(_UpperCAmelCase , "vocab.json" )
if not os.path.isdir(_UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_UpperCAmelCase ) )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase : Optional[int] = 42
_UpperCAmelCase : List[str] = 43
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : str = WavaVecaPhonemeCTCTokenizer(
_UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_UpperCAmelCase , )
_UpperCAmelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
_UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
_UpperCAmelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = UniSpeechForCTC(_UpperCAmelCase )
else:
_UpperCAmelCase : Any = UniSpeechForPreTraining(_UpperCAmelCase )
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_UpperCAmelCase : Dict = model[0].eval()
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
hf_unispeech.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 244 | '''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE : List[str] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
require_version(deps[pkg] , _UpperCAmelCase )
| 244 | 1 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Sequence[float] , __lowerCamelCase: bool = False ):
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("-inf" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 601 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
SCREAMING_SNAKE_CASE__ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Whether to log verbose messages or not."} , )
lowerCAmelCase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowerCAmelCase__ = field(
default=0.999_995 , metadata={"help": "Decay of gumbel temperature during training."} )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: ModelArguments , __lowerCamelCase: TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ = logging.WARNING
if model_args.verbose_logging:
lowercase_ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase_ = logging.INFO
logger.setLevel(__lowerCamelCase )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCAmelCase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCAmelCase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowerCAmelCase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCAmelCase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = "longest"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self , UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowercase_ = self.feature_extractor.pad(
UpperCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowercase_ = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowercase_ = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase_ = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowercase_ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase_ = 1
lowercase_ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase_ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase , min_masks=2 , )
return batch
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , *UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=1.0 , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
lowercase_ = 0
lowercase_ = max_gumbel_temp
lowercase_ = min_gumbel_temp
lowercase_ = gumbel_temp_decay
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowercase_ = self._prepare_inputs(UpperCAmelCase )
if self.use_amp:
with autocast():
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
else:
lowercase_ = self.compute_loss(UpperCAmelCase , UpperCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase_ = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
lowercase_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCamelCase , __lowerCamelCase )
# Downloading and loading a dataset from the hub.
lowercase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase_ = DatasetDict()
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCamelCase )
def prepare_dataset(__lowerCamelCase: Dict ):
# check that all files have the correct sampling rate
lowercase_ , lowercase_ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase_ = datasets.map(
__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
lowercase_ = vectorized_datasets.filter(
lambda __lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCamelCase: Optional[Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase_ = vectorized_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase_ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
lowercase_ = WavaVecaForPreTraining(__lowerCamelCase )
lowercase_ = DataCollatorForWavaVecaPretraining(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
lowercase_ = WavaVecaPreTrainer(
model=__lowerCamelCase , data_collator=__lowerCamelCase , args=__lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 601 | 1 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> int:
snake_case = min(lowerCamelCase_ ) # min() finds the minimum value
snake_case = max(lowerCamelCase_ ) # max() finds the maximum value
snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
snake_case = 0
for count in range(lowerCamelCase_ ):
while holes[count] > 0:
holes[count] -= 1
snake_case = count + min_val
i += 1
def __lowerCamelCase ( ) -> Dict:
snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCamelCase_ )
print("""Sorted order is:""" , """ """.join(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 369 |
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : int = (boundary[1] - boundary[0]) / steps
lowerCAmelCase__ : Optional[int] = boundary[0]
lowerCAmelCase__ : int = boundary[1]
lowerCAmelCase__ : Tuple = make_points(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : str = 0.0
y += (h / 2.0) * f(lowerCamelCase_)
for i in x_i:
# print(i)
y += h * f(lowerCamelCase_)
y += (h / 2.0) * f(lowerCamelCase_)
return y
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Dict):
'''simple docstring'''
lowerCAmelCase__ : Dict = a + h
while x < (b - h):
yield x
lowerCAmelCase__ : Any = x + h
def lowerCAmelCase__ ( lowerCamelCase_ : str): # enter your function here
'''simple docstring'''
lowerCAmelCase__ : List[str] = (x - 0) * (x - 0)
return y
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 0.0 # Lower bound of integration
lowerCAmelCase__ : Optional[int] = 1.0 # Upper bound of integration
lowerCAmelCase__ : List[str] = 10.0 # define number of steps or resolution
lowerCAmelCase__ : Dict = [a, b] # define boundary of integration
lowerCAmelCase__ : Any = method_a(lowerCamelCase_ ,lowerCamelCase_)
print(f"""y = {y}""")
if __name__ == "__main__":
main()
| 647 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ : Union[str, Any] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = generator.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt='first prompt' ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'cyberpunk 2077'
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,text_to_image_strength=0.7_5 ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = 'A painting of a squirrel eating a burger '
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCAmelCase__ = pipe.image_variation(lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='numpy' ).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 632 | """simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLMProphetNetTokenizer
snake_case__ = False
snake_case__ = True
def __lowerCAmelCase ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = '[PAD]'
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'[PAD]' )
self.assertEqual(vocab_keys[1] ,'[CLS]' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(lowerCamelCase__ ) ,1_012 )
def __lowerCAmelCase ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_012 )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = XLMProphetNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] ,)
@cached_property
def __lowerCAmelCase ( self : Dict ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [35_389, 6_672, 49, 2]
self.assertListEqual(lowerCamelCase__ ,self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def __lowerCAmelCase ( self : List[str] ):
# fmt: off
UpperCAmelCase__ = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
| 632 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
lowercase_ = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ = None
lowercase_ = 2_0
lowercase_ = self._get_uniform_logits(batch_size=2 , length=SCREAMING_SNAKE_CASE_ )
# tweak scores to not be uniform anymore
lowercase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase_ = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase_ = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase_ = jax.nn.softmax(temp_dist_warper_sharper(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 )
lowercase_ = jax.nn.softmax(temp_dist_warper_smoother(SCREAMING_SNAKE_CASE_ , scores.copy() , cur_len=SCREAMING_SNAKE_CASE_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = None
lowercase_ = 1_0
lowercase_ = 2
# create ramp distribution
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy()
lowercase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase_ = 5
lowercase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, length) ).copy()
lowercase_ = top_k_warp_safety_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = None
lowercase_ = 1_0
lowercase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
lowercase_ = np.exp(top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowercase_ = np.broadcast_to(np.arange(SCREAMING_SNAKE_CASE_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase_ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
lowercase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self : Tuple ) -> Any:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that min length is applied at length 5
lowercase_ = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowercase_ = 5
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_5
lowercase_ = min_dist_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the bos_token_id score
lowercase_ = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowercase_ = 1
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase_ = 3
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : int ) -> Optional[int]:
lowercase_ = 2_0
lowercase_ = 4
lowercase_ = 0
lowercase_ = 5
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase_ = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowercase_ = 4
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase_ = 3
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = logits_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
self.assertFalse(jnp.isinf(SCREAMING_SNAKE_CASE_ ).any() )
def _lowercase ( self : int ) -> Tuple:
lowercase_ = 4
lowercase_ = 1_0
lowercase_ = 1_5
lowercase_ = 2
lowercase_ = 1
lowercase_ = 1_5
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_0
# no processor list
lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# with processor list
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ = 4
lowercase_ = 1_0
lowercase_ = 1_5
lowercase_ = 2
lowercase_ = 1
lowercase_ = 1_5
# dummy input_ids and scores
lowercase_ = ids_tensor((batch_size, sequence_length) , SCREAMING_SNAKE_CASE_ )
lowercase_ = input_ids.copy()
lowercase_ = self._get_uniform_logits(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = scores.copy()
# instantiate all dist processors
lowercase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase_ = FlaxTopKLogitsWarper(3 )
lowercase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase_ = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ = 1_0
# no processor list
def run_no_processor_list(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
lowercase_ = temp_dist_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_k_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = top_p_warp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = min_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = bos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
lowercase_ = eos_dist_proc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
return scores
# with processor list
def run_processor_list(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowercase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase_ = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cur_len=SCREAMING_SNAKE_CASE_ )
return scores
lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ )
lowercase_ = jax.jit(SCREAMING_SNAKE_CASE_ )
lowercase_ = jitted_run_no_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = jitted_run_processor_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# scores should be equal
self.assertTrue(jnp.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 97 |
from manim import *
class A__ ( __snake_case ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
UpperCamelCase = Rectangle(height=0.2_5 , width=0.2_5 )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('CPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(4 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('GPU' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Model' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
UpperCamelCase = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(_SCREAMING_SNAKE_CASE )
model_arr.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = [meta_mem.copy() for i in range(6 )]
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase = Text('Disk' , font_size=24 )
UpperCamelCase = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.2_5, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
UpperCamelCase = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = Square(0.3 )
input.set_fill(_SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(_SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0_2 )
self.play(MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = Arrow(start=_SCREAMING_SNAKE_CASE , end=_SCREAMING_SNAKE_CASE , color=_SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , _SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
UpperCamelCase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.0_2}
self.play(
Write(_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , _SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
UpperCamelCase = AnimationGroup(
FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase = 0.7
self.play(
Circumscribe(model_arr[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase = a_c
UpperCamelCase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE ) , FadeOut(_SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
UpperCamelCase = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(_SCREAMING_SNAKE_CASE ) )
self.wait()
| 280 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''gpt_neox'''
def __init__( self : Optional[Any] , _UpperCAmelCase : List[str]=50432 , _UpperCAmelCase : Any=6144 , _UpperCAmelCase : Tuple=44 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Dict=24576 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Tuple=0.25 , _UpperCAmelCase : Union[str, Any]=10000 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Any=1e-5 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = rotary_pct
UpperCAmelCase_ = rotary_emb_base
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = classifier_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = tie_word_embeddings
UpperCAmelCase_ = use_parallel_residual
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , _UpperCAmelCase )
UpperCAmelCase_ = self.rope_scaling.get("factor" , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 717 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase_ = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase_ , UpperCAmelCase_ = matrix[1][1], matrix[0][0]
UpperCAmelCase_ , UpperCAmelCase_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
UpperCAmelCase_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase_ = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 14 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase__ = "\nHuman: <<task>>\n\nAssistant: "
lowercase__ = "huggingface-tools/default-prompts"
lowercase__ = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="run" ) -> Dict:
'''simple docstring'''
if prompt_or_repo_id is None:
snake_case : Union[str, Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
snake_case : List[Any] = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(SCREAMING_SNAKE_CASE__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 638 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ = 1_2_8_0_2_2
lowercase__ = 1_2_8_0_2_8
@require_sentencepiece
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MaMaaaTokenizer
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
snake_case : int = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Optional[int] = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[int] = '''</s>'''
snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(UpperCamelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = self.get_tokenizer()
snake_case : Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2, 3, 4, 5, 6] , )
snake_case : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''This is a test''' )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = {'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """facebook/m2m100_418M"""
lowerCamelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCamelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCamelCase = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def lowerCAmelCase ( cls : List[Any] ) -> int:
"""simple docstring"""
snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
snake_case : List[str] = 1
return cls
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = '''en'''
snake_case : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
snake_case : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case : int = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = MaMaaaTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__ )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[Any] = '''en'''
snake_case : int = '''fr'''
snake_case : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors='''pt''' )
snake_case : List[str] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case : Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[str] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case : Optional[int] = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : List[Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 638 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ : int = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ : List[Any] = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ : List[Any] = Pipe()
lowerCAmelCase__ : Any = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ : Tuple = temp_rs
lowerCAmelCase__ : str = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
lowerCAmelCase__ : Union[str, Any] = Pipe()
lowerCAmelCase__ : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ : int = temp_rs
lowerCAmelCase__ : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
lowerCAmelCase__ : Optional[int] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCAmelCase ( ) -> Optional[Any]:
lowerCAmelCase__ : int = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*snake_case_ )
lowerCAmelCase__ : Tuple = odd_even_transposition(snake_case_ )
print('''Sorted List\n''' )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 710 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 470 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
__magic_name__ : Dict = "pt"
__magic_name__ : int = "tf"
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[str] = TFAutoModel.from_pretrained(self.test_model , from_pt=_a )
model_tf.save_pretrained(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = "mock_framework"
# Framework provided - return whatever the user provides
__magic_name__ : Any = FeaturesManager.determine_framework(self.test_model , _a )
self.assertEqual(_a , _a )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_a )
__magic_name__ : Dict = FeaturesManager.determine_framework(_a , _a )
self.assertEqual(_a , _a )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_a )
__magic_name__ : Union[str, Any] = FeaturesManager.determine_framework(_a , _a )
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_a )
__magic_name__ : Dict = FeaturesManager.determine_framework(_a )
self.assertEqual(_a , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_a )
__magic_name__ : List[Any] = FeaturesManager.determine_framework(_a )
self.assertEqual(_a , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_a ):
__magic_name__ : Optional[int] = FeaturesManager.determine_framework(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_tf_available" , _a ):
__magic_name__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__magic_name__ : List[Any] = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_torch_available" , _a ):
__magic_name__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a , self.framework_tf )
# Both in environment -> use PyTorch
__magic_name__ : Dict = MagicMock(return_value=_a )
__magic_name__ : Any = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_tf_available" , _a ), patch(
"transformers.onnx.features.is_torch_available" , _a ):
__magic_name__ : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_a , self.framework_pt )
# Both not in environment -> raise error
__magic_name__ : Optional[Any] = MagicMock(return_value=_a )
__magic_name__ : List[str] = MagicMock(return_value=_a )
with patch("transformers.onnx.features.is_tf_available" , _a ), patch(
"transformers.onnx.features.is_torch_available" , _a ):
with self.assertRaises(_a ):
__magic_name__ : List[str] = FeaturesManager.determine_framework(self.test_model )
| 124 |
import os
import sys
import unittest
snake_case : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
snake_case : str = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = find_backend(" if not is_torch_available():" )
self.assertEqual(_a , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__magic_name__ : Optional[int] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(_a , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__magic_name__ : Optional[int] = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(_a , "torch_and_transformers_and_onnx" )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , _a )
self.assertIn("torch_and_transformers" , _a )
self.assertIn("flax_and_transformers" , _a )
self.assertIn("torch_and_transformers_and_onnx" , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(_a , "\nCONSTANT = None\n" )
__magic_name__ : str = create_dummy_object("function" , "'torch'" )
self.assertEqual(
_a , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__magic_name__ : Tuple = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__magic_name__ : int = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__magic_name__ : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , _a )
| 124 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__magic_name__ = {
'''facebook/blenderbot_small-90M''': 512,
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = BlenderbotSmallTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_UpperCAmelCase , merges=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , ) , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : List[Any] = add_prefix_space
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 679 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Tuple = JsonDatasetReader(A__ ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_json_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = features.copy() if features else default_expected_features
UpperCAmelCase_ : Dict = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : List[str] = JsonDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
_check_json_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : List[Any] = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase_ : List[Any] = features.copy() if features else default_expected_features
UpperCAmelCase_ : List[str] = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = JsonDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case ( A__ ,A__ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase_ : Optional[int] = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCAmelCase_ : Tuple = features.copy()
UpperCAmelCase_ : Any = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : Tuple = JsonDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = JsonDatasetReader(A__ ,cache_dir=A__ ,split=A__ ).read()
_check_json_dataset(A__ ,A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def snake_case ( A__ ,A__ ,A__ ):
if issubclass(A__ ,A__ ):
UpperCAmelCase_ : Tuple = jsonl_path
elif issubclass(A__ ,A__ ):
UpperCAmelCase_ : int = [jsonl_path]
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : List[str] = JsonDatasetReader(A__ ,cache_dir=A__ ).read()
_check_json_dataset(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__=("train",) ):
assert isinstance(A__ ,A__ )
for split in splits:
UpperCAmelCase_ : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : List[Any] = JsonDatasetReader({"train": jsonl_path} ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_json_datasetdict(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : str = features.copy() if features else default_expected_features
UpperCAmelCase_ : Any = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = JsonDatasetReader({"train": jsonl_path} ,features=A__ ,cache_dir=A__ ).read()
_check_json_datasetdict(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
if split:
UpperCAmelCase_ : Tuple = {split: jsonl_path}
else:
UpperCAmelCase_ : List[Any] = "train"
UpperCAmelCase_ : str = {"train": jsonl_path, "test": jsonl_path}
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Dict = JsonDatasetReader(A__ ,cache_dir=A__ ).read()
_check_json_datasetdict(A__ ,A__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( A__ ):
return json.load(A__ )
def snake_case ( A__ ):
return [json.loads(A__ ) for line in buffer]
class UpperCamelCase_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ ).write()
buffer.seek(0 )
UpperCAmelCase_ : Union[str, Any] = load_json_function(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(exported_content[0] , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ , orient=lowerCAmelCase_ ).write()
buffer.seek(0 )
UpperCAmelCase_ : Dict = load_json(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase_ : str = load_json_function(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(exported_content[0] , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ , orient=lowerCAmelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase_ : Union[str, Any] = load_json(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase_ ) == 10
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
with pytest.raises(lowerCAmelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / f"""test.json.{extension}"""
UpperCAmelCase_ : Dict = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , compression=lowerCAmelCase_ ).write()
with fsspec.open(lowerCAmelCase_ , "rb" , compression="infer" ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
with fsspec.open(lowerCAmelCase_ , "rb" , compression="infer" ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
assert exported_content == original_content
| 95 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''T5Config'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class UpperCamelCase_ (__A ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
class UpperCamelCase_ (__A ):
__magic_name__ = '''mt5'''
__magic_name__ = MTaConfig
| 95 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = 0
@slow
def UpperCamelCase__ ( self) -> Dict:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(__lowercase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(__lowercase) , 0)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
# Check that tokenizer_type ≠ model_type
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def UpperCamelCase__ ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt'''))
__UpperCamelCase :Any = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''' , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt'''))
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''' , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
@require_tokenizers
def UpperCamelCase__ ( self) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowercase , '''vocab.txt'''))
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''bert''')
self.assertIsInstance(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowercase , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowercase , '''merges.txt'''))
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type='''gpt2''')
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Dict:
with pytest.raises(__lowercase):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''')
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[int]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCamelCase :Optional[Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''')
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
if isinstance(__lowercase , __lowercase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase)
else:
self.assertEqual(tokenizer.do_lower_case , __lowercase)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def UpperCamelCase__ ( self) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowercase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
__UpperCamelCase :Union[str, Any] = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''')
def UpperCamelCase__ ( self) -> Optional[int]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__UpperCamelCase :List[str] = TOKENIZER_MAPPING.values()
__UpperCamelCase :int = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowercase)
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[int]:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowercase) , __lowercase)
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''') , __lowercase)
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowercase)
__UpperCamelCase :Dict = '''Hello, world. How are you?'''
__UpperCamelCase :Any = tokenizer.tokenize(__lowercase)
self.assertEqual('''[UNK]''' , tokens[0])
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowercase)
__UpperCamelCase :Tuple = tokenizer.tokenize(__lowercase)
self.assertEqual('''[UNK]''' , tokens[0])
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''')
self.assertEqual(type(__lowercase) , __lowercase)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 30_000)
self.assertEqual(tokenizer.unk_token , '''[UNK]''')
self.assertEqual(tokenizer.padding_side , '''right''')
self.assertEqual(tokenizer.truncation_side , '''right''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained('''ctrl''')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# Check we can load the tokenizer config of an online model.
__UpperCamelCase :List[str] = get_tokenizer_config('''bert-base-cased''')
__UpperCamelCase :str = config.pop('''_commit_hash''' , __lowercase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowercase , {'''do_lower_case''': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCamelCase :int = get_tokenizer_config(__lowercase)
self.assertDictEqual(__lowercase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = get_tokenizer_config(__lowercase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''')
def UpperCamelCase__ ( self) -> List[str]:
try:
AutoConfig.register('''custom''' , __lowercase)
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
__UpperCamelCase :List[Any] = CustomTokenizer.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase__ ( self) -> Optional[int]:
try:
AutoConfig.register('''custom''' , __lowercase)
# Can register in two steps
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase :str = BertTokenizerFast.from_pretrained(__lowercase)
bert_tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :List[str] = CustomTokenizerFast.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :int = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
__UpperCamelCase :int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
__UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase)
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
@require_tokenizers
def UpperCamelCase__ ( self) -> Any:
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = False
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = NewTokenizer
a__ : str = False
try:
AutoConfig.register('''custom''' , __lowercase)
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
# If remote code is not set, the default is to use local
__UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
__UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertTrue(tokenizer.special_attribute_present)
__UpperCamelCase :Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def UpperCamelCase__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :str = AutoTokenizer.from_pretrained('''bert-base''')
def UpperCamelCase__ ( self) -> List[str]:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained(__lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> List[str]:
# Make sure we have cached the tokenizer.
__UpperCamelCase :Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
with RequestCounter() as counter:
__UpperCamelCase :Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 452 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = """unispeech"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="mean" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=80 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=0.5 , **__lowercase , ) -> Optional[int]:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :str = hidden_size
__UpperCamelCase :List[str] = feat_extract_norm
__UpperCamelCase :str = feat_extract_activation
__UpperCamelCase :str = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :Any = conv_bias
__UpperCamelCase :List[Any] = num_conv_pos_embeddings
__UpperCamelCase :Tuple = num_conv_pos_embedding_groups
__UpperCamelCase :Optional[int] = len(self.conv_dim)
__UpperCamelCase :Optional[int] = num_hidden_layers
__UpperCamelCase :Union[str, Any] = intermediate_size
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Optional[int] = num_attention_heads
__UpperCamelCase :Any = hidden_dropout
__UpperCamelCase :List[str] = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :int = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Optional[Any] = layerdrop
__UpperCamelCase :Any = layer_norm_eps
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Tuple = num_ctc_classes
__UpperCamelCase :Union[str, Any] = vocab_size
__UpperCamelCase :List[Any] = do_stable_layer_norm
__UpperCamelCase :Dict = use_weighted_layer_sum
__UpperCamelCase :str = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Optional[int] = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Any = mask_time_min_masks
__UpperCamelCase :Any = mask_feature_prob
__UpperCamelCase :str = mask_feature_length
__UpperCamelCase :Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :Dict = num_codevector_groups
__UpperCamelCase :Optional[int] = contrastive_logits_temperature
__UpperCamelCase :Union[str, Any] = feat_quantizer_dropout
__UpperCamelCase :List[str] = num_negatives
__UpperCamelCase :Union[str, Any] = codevector_dim
__UpperCamelCase :int = proj_codevector_dim
__UpperCamelCase :Tuple = diversity_loss_weight
# ctc loss
__UpperCamelCase :List[Any] = ctc_loss_reduction
__UpperCamelCase :int = ctc_zero_infinity
# pretraining loss
__UpperCamelCase :Optional[Any] = replace_prob
@property
def UpperCamelCase__ ( self) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 452 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[str] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 454 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
def __init__(self : str , _A : int , _A : Union[str, Any]=sys.maxsize) -> Tuple:
__snake_case : Optional[int] = '''bilinear'''
__snake_case : Optional[Any] = max_size
__snake_case : List[Any] = short_edge_length
def __call__(self : int , _A : Dict) -> Dict:
__snake_case : Any = []
for img in imgs:
__snake_case : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__snake_case : int = size * 1.0 / min(_A , _A)
if h < w:
__snake_case : Dict = size, scale * w
else:
__snake_case : str = scale * h, size
if max(_A , _A) > self.max_size:
__snake_case : Any = self.max_size * 1.0 / max(_A , _A)
__snake_case : Tuple = newh * scale
__snake_case : Any = neww * scale
__snake_case : List[str] = int(neww + 0.5)
__snake_case : List[str] = int(newh + 0.5)
if img.dtype == np.uinta:
__snake_case : Optional[int] = Image.fromarray(_A)
__snake_case : List[str] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__snake_case : Optional[Any] = np.asarray(_A)
else:
__snake_case : Any = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__snake_case : Union[str, Any] = nn.functional.interpolate(
_A , (newh, neww) , mode=self.interp_method , align_corners=_A).squeeze(0)
img_augs.append(_A)
return img_augs
class UpperCamelCase :
def __init__(self : Tuple , _A : Dict) -> Any:
__snake_case : Optional[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__snake_case : Any = cfg.INPUT.FORMAT
__snake_case : List[Any] = cfg.SIZE_DIVISIBILITY
__snake_case : List[str] = cfg.PAD_VALUE
__snake_case : Dict = cfg.INPUT.MAX_SIZE_TEST
__snake_case : Optional[Any] = cfg.MODEL.DEVICE
__snake_case : int = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : int = lambda _A: (x - self.pixel_mean) / self.pixel_std
def _lowercase (self : List[Any] , _A : List[Any]) -> List[Any]:
__snake_case : Optional[Any] = tuple(max(_A) for s in zip(*[img.shape for img in images]))
__snake_case : Union[str, Any] = [im.shape[-2:] for im in images]
__snake_case : Tuple = [
nn.functional.pad(
_A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_A , _A)
]
return torch.stack(_A), torch.tensor(_A)
def __call__(self : List[Any] , _A : str , _A : Union[str, Any]=False) -> List[Any]:
with torch.no_grad():
if not isinstance(_A , _A):
__snake_case : Any = [images]
if single_image:
assert len(_A) == 1
for i in range(len(_A)):
if isinstance(images[i] , torch.Tensor):
images.insert(_A , images.pop(_A).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_A , torch.as_tensor(img_tensorize(images.pop(_A) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__snake_case : Union[str, Any] = torch.tensor([im.shape[:2] for im in images])
__snake_case : Dict = self.aug(_A)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case : List[str] = [self.normalizer(_A) for x in images]
# now pad them to do the following operations
__snake_case : Dict = self.pad(_A)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case : Union[str, Any] = torch.true_divide(_A , _A)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple[int, int] ) -> Union[str, Any]:
'''simple docstring'''
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
__snake_case : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 708 | """simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
def __init__(self : Tuple , _A : List[Any] , _A : int=sys.maxsize) -> int:
__snake_case : Dict = 'bilinear'
__snake_case : int = max_size
__snake_case : Any = short_edge_length
def __call__(self : Optional[Any] , _A : Any) -> Dict:
__snake_case : Any = []
for img in imgs:
__snake_case , __snake_case : int = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__snake_case : int = size * 1.0 / min(_A , _A)
if h < w:
__snake_case , __snake_case : str = size, scale * w
else:
__snake_case , __snake_case : List[str] = scale * h, size
if max(_A , _A) > self.max_size:
__snake_case : str = self.max_size * 1.0 / max(_A , _A)
__snake_case : Any = newh * scale
__snake_case : Any = neww * scale
__snake_case : Optional[int] = int(neww + 0.5)
__snake_case : str = int(newh + 0.5)
if img.dtype == np.uinta:
__snake_case : str = Image.fromarray(_A)
__snake_case : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__snake_case : Tuple = np.asarray(_A)
else:
__snake_case : str = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__snake_case : Dict = nn.functional.interpolate(
_A , (newh, neww) , mode=self.interp_method , align_corners=_A).squeeze(0)
img_augs.append(_A)
return img_augs
class UpperCamelCase :
def __init__(self : List[str] , _A : List[str]) -> Optional[int]:
__snake_case : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__snake_case : Union[str, Any] = cfg.INPUT.FORMAT
__snake_case : List[Any] = cfg.SIZE_DIVISIBILITY
__snake_case : Tuple = cfg.PAD_VALUE
__snake_case : int = cfg.INPUT.MAX_SIZE_TEST
__snake_case : List[Any] = cfg.MODEL.DEVICE
__snake_case : Tuple = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Any = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__snake_case : Union[str, Any] = lambda _A: (x - self.pixel_mean) / self.pixel_std
def _lowercase (self : Tuple , _A : str) -> int:
__snake_case : str = tuple(max(_A) for s in zip(*[img.shape for img in images]))
__snake_case : int = [im.shape[-2:] for im in images]
__snake_case : int = [
nn.functional.pad(
_A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_A , _A)
]
return torch.stack(_A), torch.tensor(_A)
def __call__(self : Optional[Any] , _A : Dict , _A : Union[str, Any]=False) -> Optional[int]:
with torch.no_grad():
if not isinstance(_A , _A):
__snake_case : str = [images]
if single_image:
assert len(_A) == 1
for i in range(len(_A)):
if isinstance(images[i] , torch.Tensor):
images.insert(_A , images.pop(_A).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
_A , torch.as_tensor(img_tensorize(images.pop(_A) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__snake_case : Any = torch.tensor([im.shape[:2] for im in images])
__snake_case : Tuple = self.aug(_A)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case : List[str] = [self.normalizer(_A) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case : List[Any] = self.pad(_A)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case : int = torch.true_divide(_A , _A)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> int:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple[int, int] ) -> Union[str, Any]:
'''simple docstring'''
assert torch.isfinite(UpperCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case : Optional[int] = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase_ )
| 192 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : list[int] , UpperCamelCase__ : str ):
"""simple docstring"""
a_ : Tuple = int(UpperCamelCase__ )
# Initialize Result
a_ : Tuple = []
# Traverse through all denomination
for denomination in reversed(UpperCamelCase__ ):
# Find denominations
while int(UpperCamelCase__ ) >= int(UpperCamelCase__ ):
total_value -= int(UpperCamelCase__ )
answer.append(UpperCamelCase__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowerCAmelCase_ : str = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
lowerCAmelCase_ : Union[str, Any] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase_ : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"Following is minimal change for {value}: ")
lowerCAmelCase_ : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 442 | 0 |
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
UpperCAmelCase_ : Optional[int] = len(bin(lowerCamelCase_ )[3:] )
UpperCAmelCase_ : int = bin(abs(lowerCamelCase_ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Dict = (
(
'1'
+ '0' * (binary_number_length - len(lowerCamelCase_ ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 | '''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Dict = '''Hello world! cécé herlolip'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : bool ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = FairseqRobertaModel.from_pretrained(lowerCamelCase_ )
roberta.eval() # disable dropout
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.sentence_encoder
UpperCAmelCase_ : str = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
UpperCAmelCase_ : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCamelCase_ )
UpperCAmelCase_ : str = XLMRobertaXLForSequenceClassification(lowerCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ : List[Any] = roberta_sent_encoder.embed_tokens.weight
UpperCAmelCase_ : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
UpperCAmelCase_ : Tuple = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
UpperCAmelCase_ : int = roberta_sent_encoder.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ : BertLayer = model.roberta.encoder.layer[i]
UpperCAmelCase_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
UpperCAmelCase_ : RobertaAttention = layer.attention
UpperCAmelCase_ : str = roberta_layer.self_attn_layer_norm.weight
UpperCAmelCase_ : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
UpperCAmelCase_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.q_proj.weight
UpperCAmelCase_ : str = roberta_layer.self_attn.q_proj.bias
UpperCAmelCase_ : List[str] = roberta_layer.self_attn.k_proj.weight
UpperCAmelCase_ : Any = roberta_layer.self_attn.k_proj.bias
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.weight
UpperCAmelCase_ : List[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
UpperCAmelCase_ : Tuple = roberta_layer.self_attn.out_proj.weight
UpperCAmelCase_ : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
UpperCAmelCase_ : Any = roberta_layer.final_layer_norm.weight
UpperCAmelCase_ : int = roberta_layer.final_layer_norm.bias
# intermediate
UpperCAmelCase_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Any = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# output
UpperCAmelCase_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
UpperCAmelCase_ : Any = roberta_layer.fca.weight
UpperCAmelCase_ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].dense.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].dense.bias
UpperCAmelCase_ : List[str] = roberta.model.classification_heads['mnli'].out_proj.weight
UpperCAmelCase_ : Dict = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight
UpperCAmelCase_ : Optional[int] = roberta.model.encoder.lm_head.dense.bias
UpperCAmelCase_ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ : Dict = roberta.model.encoder.lm_head.weight
UpperCAmelCase_ : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ : torch.Tensor = roberta.encode(lowerCamelCase_ ).unsqueeze(0 ) # batch of size 1
UpperCAmelCase_ : Any = model(lowerCamelCase_ )[0]
if classification_head:
UpperCAmelCase_ : Tuple = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCamelCase_ ) )
else:
UpperCAmelCase_ : Any = roberta.model(lowerCamelCase_ )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase_ : List[Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowerCamelCase_ ).mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
snake_case__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
snake_case__ : List[str] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 389 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''pixel_values''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : List[str] = resample
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : Tuple = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self : int , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : Any = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : int = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[int] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : int = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 62 | '''simple docstring'''
import math
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Any = [True] * n
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : int = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
_UpperCAmelCase : Any = i * 2
while index < n:
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : int = index + i
_UpperCAmelCase : Optional[int] = [2]
for i in range(3, a_, 2 ):
if is_prime[i]:
primes.append(a_ )
return primes
def __UpperCAmelCase ( a_: int = 999_966_663_333 ):
_UpperCAmelCase : Tuple = math.floor(math.sqrt(a_ ) ) + 100
_UpperCAmelCase : Union[str, Any] = prime_sieve(a_ )
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = primes[prime_index]
while (last_prime**2) <= limit:
_UpperCAmelCase : List[str] = primes[prime_index + 1]
_UpperCAmelCase : Any = last_prime**2
_UpperCAmelCase : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
_UpperCAmelCase : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_UpperCAmelCase : Dict = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_UpperCAmelCase : Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_UpperCAmelCase : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 494 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_lowercase : int = 1_00_00
_lowercase : Optional[List[str]] = None
_lowercase : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
_lowercase : Optional[int] = ParquetConfig
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files)
if isinstance(a , (str, list, tuple)):
SCREAMING_SNAKE_CASE = data_files
if isinstance(a , a):
SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(a , a):
SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(a) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a):
with open(a , 'rb') as f:
SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(a))
break
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE__ ( self , a) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(a , self.info.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(a)):
with open(a , 'rb') as f:
SCREAMING_SNAKE_CASE = pq.ParquetFile(a)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(a)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(a)}: {e}''')
raise
| 444 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _snake_case ( A__ ):
def __init__( self) -> int:
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE = self.advance()
if not self.does_advance(a):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.update(a)
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class _snake_case ( A__ ):
def __init__( self , a) -> Dict:
super(a , self).__init__()
if not isinstance(a , a) or len(a) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''')
if any((not isinstance(a , a) or token_id < 0) for token_id in token_ids):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''')
SCREAMING_SNAKE_CASE = token_ids
SCREAMING_SNAKE_CASE = len(self.token_ids)
SCREAMING_SNAKE_CASE = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
if not isinstance(a , a):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(a)}''')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(a)}''')
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
if self.does_advance(a):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> int:
SCREAMING_SNAKE_CASE = PhrasalConstraint(self.token_ids)
if stateful:
SCREAMING_SNAKE_CASE = self.seqlen
SCREAMING_SNAKE_CASE = self.fulfilled_idx
SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class _snake_case :
def __init__( self , a , a=True) -> Tuple:
SCREAMING_SNAKE_CASE = max([len(a) for one in nested_token_ids])
SCREAMING_SNAKE_CASE = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE = root
for tidx, token_id in enumerate(a):
if token_id not in level:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = level[token_id]
if no_subsets and self.has_subsets(a , a):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''')
SCREAMING_SNAKE_CASE = root
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE = start[current_token]
SCREAMING_SNAKE_CASE = list(start.keys())
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = self.next_tokens(a)
return len(a) == 0
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = list(root.values())
if len(a) == 0:
return 1
else:
return sum([self.count_leaves(a) for nn in next_nodes])
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.count_leaves(a)
return len(a) != leaf_count
class _snake_case ( A__ ):
def __init__( self , a) -> Dict:
super(a , self).__init__()
if not isinstance(a , a) or len(a) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''')
if any(not isinstance(a , a) for token_ids in nested_token_ids):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''')
if any(
any((not isinstance(a , a) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''')
SCREAMING_SNAKE_CASE = DisjunctiveTrie(a)
SCREAMING_SNAKE_CASE = nested_token_ids
SCREAMING_SNAKE_CASE = self.trie.max_height
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq)
if len(a) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a)}''')
SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a)}''')
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
if self.does_advance(a):
self.current_seq.append(a)
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = True
self.reset()
SCREAMING_SNAKE_CASE = self.trie.reached_leaf(self.current_seq)
SCREAMING_SNAKE_CASE = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = []
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = DisjunctiveConstraint(self.token_ids)
if stateful:
SCREAMING_SNAKE_CASE = self.seqlen
SCREAMING_SNAKE_CASE = self.current_seq
SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class _snake_case :
def __init__( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE = max([c.seqlen for c in constraints])
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = [constraint.copy(stateful=a) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE = constraint.advance()
if isinstance(a , a):
token_list.append(a)
elif isinstance(a , a):
token_list.extend(a)
else:
SCREAMING_SNAKE_CASE = self.inprogress_constraint.advance()
if isinstance(a , a):
token_list.append(a)
elif isinstance(a , a):
token_list.extend(a)
if len(a) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.add(a)
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
if not isinstance(a , a):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False, False
if self.completed:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.inprogress_constraint.update(a)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a))
SCREAMING_SNAKE_CASE = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
SCREAMING_SNAKE_CASE = None
if len(self.pending_constraints) == 0:
# we're done!
SCREAMING_SNAKE_CASE = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(a):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pending_constraint.update(a)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(a)
SCREAMING_SNAKE_CASE = None
if not complete and stepped:
SCREAMING_SNAKE_CASE = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self , a=True) -> str:
SCREAMING_SNAKE_CASE = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE = [
constraint.copy(stateful=a) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE = self.inprogress_constraint.copy(stateful=a)
SCREAMING_SNAKE_CASE = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 444 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Union[str, Any] = tmp_path / '''cache'''
snake_case__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Union[str, Any] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Dict = features.copy() if features else default_expected_features
snake_case__ : Any = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Any = ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Optional[int] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = parquet_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : List[Any] = [parquet_path]
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Optional[int] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _a ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=("train",) ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
snake_case__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict ):
"""simple docstring"""
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Dict = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Optional[Any] = features.copy() if features else default_expected_features
snake_case__ : str = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Optional[int] = ParquetDatasetReader({'''train''': parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
"""simple docstring"""
if split:
snake_case__ : List[str] = {split: parquet_path}
else:
snake_case__ : Any = '''train'''
snake_case__ : Union[str, Any] = {'''train''': parquet_path, '''test''': parquet_path}
snake_case__ : Any = tmp_path / '''cache'''
snake_case__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case__ : Union[str, Any] = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
snake_case__ : str = pq.ParquetFile(tmp_path / '''foo.parquet''' )
snake_case__ : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : Tuple = str(shared_datadir / '''test_image_rgb.jpg''' )
snake_case__ : List[Any] = {'''image''': [image_path]}
snake_case__ : Union[str, Any] = Features({'''image''': Image()} )
snake_case__ : Optional[Any] = Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase )
snake_case__ : int = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
snake_case__ : str = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
snake_case__ : Optional[Any] = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert get_writer_batch_size(__lowerCAmelCase ) == expected
| 347 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : List[str]=2 , snake_case_ : Optional[int]=3 , snake_case_ : Union[str, Any]=6_4 , snake_case_ : Optional[Any]=None ):
'''simple docstring'''
snake_case__ : List[str] = np.random.default_rng(snake_case_ )
snake_case__ : int = length
snake_case__ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
snake_case__ : Optional[Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.length
def __getitem__( self : List[str] , snake_case_ : int ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : int , snake_case_ : str=0 , snake_case_ : Optional[Any]=0 , snake_case_ : Tuple=False ):
'''simple docstring'''
super().__init__()
snake_case__ : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : int = True
def __magic_name__ ( self : int , snake_case_ : str=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case__ : str = False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Tuple=0 , snake_case_ : int=0 , snake_case_ : int=False ):
'''simple docstring'''
super().__init__()
snake_case__ : Tuple = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
snake_case__ : int = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
snake_case__ : Union[str, Any] = True
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case__ : List[Any] = False
return x * self.a + self.b
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : Optional[int] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
snake_case__ : List[Any] = load_dataset('''csv''' , data_files=__lowerCAmelCase )
snake_case__ : Union[str, Any] = datasets['''train'''].unique('''label''' )
snake_case__ : Optional[Any] = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Union[str, Any] = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
if "label" in examples:
snake_case__ : List[Any] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : List[Any] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__lowerCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
snake_case__ : str = DataLoader(tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
snake_case__ : List[Any] = DataLoader(tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 347 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 477 | """simple docstring"""
import numpy as np
def lowercase__( __SCREAMING_SNAKE_CASE : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
snake_case_ : Optional[Any] = None
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
snake_case_ : Optional[Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
snake_case_ : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ["input_ids", "attention_mask"]
lowerCamelCase = MBartTokenizer
lowerCamelCase = []
lowerCamelCase = []
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , __magic_name__ : Dict="<s>" , __magic_name__ : int="</s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Optional[int]="<unk>" , __magic_name__ : Any="<pad>" , __magic_name__ : Any="<mask>" , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : str=None , **__magic_name__ : List[str] , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : List[str] = vocab_file
lowerCamelCase_ : Optional[Any] = False if not self.vocab_file else True
lowerCamelCase_ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ : str = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ) -> None:
lowerCamelCase_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Any ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ : Optional[Any] = src_lang
lowerCamelCase_ : Tuple = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCamelCase_ : Union[str, Any] = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : List[Any] = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Optional[int] , ) -> BatchEncoding:
lowerCamelCase_ : Optional[int] = src_lang
lowerCamelCase_ : Any = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Tuple ) -> None:
lowerCamelCase_ : Union[str, Any] = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str ) -> None:
lowerCamelCase_ : Dict = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : Any = []
lowerCamelCase_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__magic_name__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
lowerCamelCase_ : Optional[Any] = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 488 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : Any =OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
__lowerCAmelCase : Any =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCamelCase ( lowercase__ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__SCREAMING_SNAKE_CASE : str = model_type_to_module_name(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(lowercase__ , lowercase__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase__ , '''__name__''' , lowercase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module('''transformers''' )
if hasattr(lowercase__ , lowercase__ ):
return getattr(lowercase__ , lowercase__ )
return None
def _UpperCamelCase ( lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , **lowercase__ , ):
__SCREAMING_SNAKE_CASE : List[Any] = get_file_from_repo(
lowercase__ , lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , resume_download=lowercase__ , proxies=lowercase__ , use_auth_token=lowercase__ , revision=lowercase__ , local_files_only=lowercase__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowercase__ , encoding='''utf-8''' ) as reader:
return json.load(lowercase__ )
class _lowercase :
'''simple docstring'''
def __init__( self :str ) -> List[Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase__ )
def __magic_name__( cls :Any , lowerCAmelCase__ :Any , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''config''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''trust_remote_code''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = ImageProcessingMixin.get_image_processor_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict.get('''image_processor_type''' , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
__SCREAMING_SNAKE_CASE : Optional[int] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict.pop('''feature_extractor_type''' , lowerCAmelCase__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
__SCREAMING_SNAKE_CASE : int = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
__SCREAMING_SNAKE_CASE : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# It could be in `config.image_processor_type``
__SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCAmelCase__ , '''image_processor_type''' , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__SCREAMING_SNAKE_CASE : str = image_processor_class_from_name(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor_auto_map is not None
__SCREAMING_SNAKE_CASE : Tuple = image_processor_class is not None or type(lowerCAmelCase__ ) in IMAGE_PROCESSOR_MAPPING
__SCREAMING_SNAKE_CASE : Any = resolve_trust_remote_code(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if has_remote_code and trust_remote_code:
__SCREAMING_SNAKE_CASE : Tuple = get_class_from_dynamic_module(
lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''code_revision''' , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCAmelCase__ ) in IMAGE_PROCESSOR_MAPPING:
__SCREAMING_SNAKE_CASE : Dict = IMAGE_PROCESSOR_MAPPING[type(lowerCAmelCase__ )]
return image_processor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __magic_name__( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(lowerCAmelCase__ , lowerCAmelCase__ )
| 260 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__SCREAMING_SNAKE_CASE : str = MaskFormerConfig(backbone_config=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[Any] = 847
__SCREAMING_SNAKE_CASE : Optional[int] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Dict = 150
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Union[str, Any] = 171
__SCREAMING_SNAKE_CASE : Tuple = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__SCREAMING_SNAKE_CASE : Dict = 133
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[int] = 19
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Tuple = 65
__SCREAMING_SNAKE_CASE : Optional[Any] = '''mapillary-vistas-id2label.json'''
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Any = {int(lowercase__ ): v for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__SCREAMING_SNAKE_CASE : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
-dim :, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# fmt: off
__SCREAMING_SNAKE_CASE : Any = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Dict = get_maskformer_config(lowercase__ )
# load original state_dict
with open(lowercase__ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Any = pickle.load(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__SCREAMING_SNAKE_CASE : Dict = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_swin_q_k_v(lowercase__ , config.backbone_config )
read_in_decoder_q_k_v(lowercase__ , lowercase__ )
# update to torch tensors
for key, value in state_dict.items():
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(lowercase__ )
# load 🤗 model
__SCREAMING_SNAKE_CASE : Union[str, Any] = MaskFormerForInstanceSegmentation(lowercase__ )
model.eval()
for name, param in model.named_parameters():
print(lowercase__ , param.shape )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase__ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 65
elif "cityscapes" in model_name:
__SCREAMING_SNAKE_CASE : str = 65535
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 255
__SCREAMING_SNAKE_CASE : Dict = True if '''ade''' in model_name else False
__SCREAMING_SNAKE_CASE : int = MaskFormerImageProcessor(ignore_index=lowercase__ , reduce_labels=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowercase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Any = model(**lowercase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 260 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '''▁'''
_A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_A = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_A = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class A ( UpperCamelCase__ ):
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'attention_mask']
def __init__( self, UpperCamelCase__, UpperCamelCase__="<s>", UpperCamelCase__="</s>", UpperCamelCase__="</s>", UpperCamelCase__="<s>", UpperCamelCase__="<unk>", UpperCamelCase__="<pad>", UpperCamelCase__="<mask>", UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = AddedToken(snake_case_, lstrip=snake_case_, rstrip=snake_case_ ) if isinstance(snake_case_, snake_case_ ) else mask_token
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_, eos_token=snake_case_, unk_token=snake_case_, sep_token=snake_case_, cls_token=snake_case_, pad_token=snake_case_, mask_token=snake_case_, sp_model_kwargs=self.sp_model_kwargs, **snake_case_, )
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
lowerCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ = 1
lowerCAmelCase_ = len(self.sp_model ) + self.fairseq_offset
lowerCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_, token_ids_a=snake_case_, already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(snake_case_, out_type=snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ''''''.join(snake_case_ ).replace(snake_case_, ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
snake_case_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_, '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 431 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_A = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
_A = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = LxmertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE : List[str] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : str =getattr(snake_case_ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[str] =do_lower_case
SCREAMING_SNAKE_CASE : Dict =strip_accents
SCREAMING_SNAKE_CASE : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Union[str, Any] =normalizer_class(**snake_case_ )
SCREAMING_SNAKE_CASE : str =do_lower_case
def __a ( self , snake_case_ , snake_case_=None ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , snake_case_ , snake_case_ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE : List[Any] =self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 258 | 0 |
'''simple docstring'''
def snake_case_ (_a : float , _a : float ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(_a ) * abs(_a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 358 |
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358 | 1 |
__UpperCamelCase : int = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__UpperCamelCase : Optional[int] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__UpperCamelCase : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 80 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ : List[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
lowerCAmelCase_ : Optional[int] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
lowerCAmelCase_ : Any = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Tuple = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
lowerCAmelCase_ : Optional[int] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _lowerCamelCase ( lowercase : Any , lowercase : Any ) -> Optional[Any]:
for tf_name, hf_name in patterns:
_a = k.replace(lowercase , lowercase )
return k
def _lowerCamelCase ( lowercase : dict , lowercase : dict ) -> BigBirdPegasusForConditionalGeneration:
_a = BigBirdPegasusConfig(**lowercase )
_a = BigBirdPegasusForConditionalGeneration(lowercase )
_a = torch_model.state_dict()
_a = {}
# separating decoder weights
_a = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_a = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = DECODER_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_a = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
_a = REMAINING_PATTERNS
_a = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_a = v.T
_a = torch.from_numpy(lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_a = mapping["model.embed_positions.weight"]
_a = mapping.pop("model.embed_positions.weight" )
_a , _a = torch_model.load_state_dict(lowercase , strict=lowercase )
_a = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = tf.train.list_variables(lowercase )
_a = {}
_a = ["global_step"]
for name, shape in tqdm(lowercase , desc="converting tf checkpoint to dict" ):
_a = any(pat in name for pat in ignore_name )
if skip_key:
continue
_a = tf.train.load_variable(lowercase , lowercase )
_a = array
return tf_weights
def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : dict ) -> Union[str, Any]:
_a = get_tf_weights_as_numpy(lowercase )
_a = convert_bigbird_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
lowerCAmelCase_ : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 692 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( __a , __a , unittest.TestCase ):
_a : str = IFInpaintingSuperResolutionPipeline
_a : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_a : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
_a : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(A__ ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(A__ )
else:
__lowerCAmelCase = torch.Generator(device=A__ ).manual_seed(A__ )
__lowerCAmelCase = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A__ ) ).to(A__ )
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A__ ) ).to(A__ )
__lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A__ ) ).to(A__ )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_save_load_local()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 552 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ : Tuple = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
SCREAMING_SNAKE_CASE__ : Tuple = update_area_of_max_square(row + 1 , col + 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ : List[str] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Any = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Any = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : str = [0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : Tuple = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ : str = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ : int = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_row[col + 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = next_row[col + 1]
SCREAMING_SNAKE_CASE__ : List[Any] = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : Tuple = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 663 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Any = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = patch_norm
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride
SCREAMING_SNAKE_CASE__ : List[Any] = out_features
SCREAMING_SNAKE_CASE__ : Dict = out_indices
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"]
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase )
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return
def A_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : str = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = 0
return t
def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple()
def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ):
if isinstance(_UpperCAmelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values() ):
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.'''
), )
recursive_check(_UpperCAmelCase, _UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = MaskFormerSwinConfig
def A_ ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase )
backbone.to(_UpperCAmelCase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 663 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "xlm"
__UpperCamelCase : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self :List[str] , SCREAMING_SNAKE_CASE :int=3_0_1_4_5 , SCREAMING_SNAKE_CASE :List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Tuple=1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :str=1 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Any=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE :Any=1e-12 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :Tuple=1 , SCREAMING_SNAKE_CASE :Tuple=2 , SCREAMING_SNAKE_CASE :Optional[int]=3 , SCREAMING_SNAKE_CASE :Dict=5 , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :List[Any]="first" , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=True , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :List[str]=5 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :Any=2 , SCREAMING_SNAKE_CASE :Optional[int]=0 , **SCREAMING_SNAKE_CASE :Tuple , ) -> List[str]:
'''simple docstring'''
_a : Tuple =vocab_size
_a : int =emb_dim
_a : Dict =n_layers
_a : List[Any] =n_heads
_a : str =dropout
_a : Tuple =attention_dropout
_a : Dict =gelu_activation
_a : Any =sinusoidal_embeddings
_a : str =causal
_a : str =asm
_a : Tuple =n_langs
_a : str =use_lang_emb
_a : Dict =layer_norm_eps
_a : Union[str, Any] =bos_index
_a : int =eos_index
_a : Optional[int] =pad_index
_a : List[Any] =unk_index
_a : int =mask_index
_a : Any =is_encoder
_a : Tuple =max_position_embeddings
_a : Optional[Any] =embed_init_std
_a : List[Any] =init_std
_a : str =summary_type
_a : Optional[int] =summary_use_proj
_a : List[str] =summary_activation
_a : Tuple =summary_proj_to_labels
_a : List[Any] =summary_first_dropout
_a : Union[str, Any] =start_n_top
_a : Optional[int] =end_n_top
_a : List[Any] =mask_token_id
_a : List[Any] =lang_id
if "n_words" in kwargs:
_a : Dict =kwargs["""n_words"""]
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Optional[Any] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : Tuple ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 506 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=1_6 , _A=3_6 , _A=6 , _A=6 , _A=6 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : List[str] = seq_length
UpperCamelCase : str = is_training
UpperCamelCase : Optional[Any] = use_input_mask
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : List[str] = use_labels
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[int] = embedding_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Tuple = num_hidden_groups
UpperCamelCase : str = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Any = num_labels
UpperCamelCase : Dict = num_choices
UpperCamelCase : List[str] = scope
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Any = None
if self.use_token_type_ids:
UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Tuple = None
UpperCamelCase : Tuple = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = AlbertModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A )
UpperCamelCase : str = model(_A , token_type_ids=_A )
UpperCamelCase : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertForPreTraining(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Tuple = AlbertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : int = AlbertForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCamelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.num_labels
UpperCamelCase : Optional[Any] = AlbertForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : int = self.num_choices
UpperCamelCase : Any = AlbertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCamelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : str = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = True
def _a ( self , _A , _A , _A=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
UpperCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=_A , hidden_size=3_7 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : int = type
self.model_tester.create_and_check_model(*_A )
@slow
def _a ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = AlbertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : Tuple = model(_A , attention_mask=_A )[0]
UpperCamelCase : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _A )
UpperCamelCase : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 102 |
"""simple docstring"""
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ = n - k
# Calculate C(n,k)
for i in range(_a ):
result *= n - i
result //= i + 1
return result
def snake_case ( _a: int )-> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , _a ) // (node_count + 1)
def snake_case ( _a: int )-> int:
'''simple docstring'''
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def snake_case ( _a: int )-> int:
'''simple docstring'''
return catalan_number(_a ) * factorial(_a )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 510 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( lowercase__ , unittest.TestCase ):
a__ : List[Any] = ShapEPipeline
a__ : List[Any] = ["""prompt"""]
a__ : Union[str, Any] = ["""prompt"""]
a__ : Optional[int] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
a__ : Dict = False
@property
def _lowercase (self : Dict ):
return 32
@property
def _lowercase (self : str ):
return 32
@property
def _lowercase (self : Tuple ):
return self.time_input_dim * 4
@property
def _lowercase (self : Any ):
return 8
@property
def _lowercase (self : List[str] ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowercase (self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def _lowercase (self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCAmelCase_ = PriorTransformer(**__a )
return model
@property
def _lowercase (self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ = ShapERenderer(**__a )
return model
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.dummy_prior
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_renderer
UpperCAmelCase_ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
UpperCAmelCase_ = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _lowercase (self : Tuple , __a : str , __a : str=0 ):
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _lowercase (self : str ):
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(__a ) )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase (self : List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = torch_device == "cpu"
UpperCAmelCase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__a )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
UpperCAmelCase_ = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ = batch_size * [inputs[key]]
UpperCAmelCase_ = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
UpperCAmelCase_ = ShapEPipeline.from_pretrained("openai/shap-e" )
UpperCAmelCase_ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = pipe(
"a shark" , generator=__a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 704 | '''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def _lowercase (*__a : Any , **__a : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class __A ( unittest.TestCase ):
@require_torch
def _lowercase (self : int ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}],
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "c"}, {"score": 0.3_33, "label": "b"}],
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@require_tf
def _lowercase (self : List[str] ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@slow
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
| 415 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =0
lowerCamelCase__ =len(__lowerCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase__ =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCAmelCase ):
return None
lowerCamelCase__ =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCamelCase__ =left
lowerCamelCase__ =point
elif point > right:
lowerCamelCase__ =right
lowerCamelCase__ =point
else:
if item < current_item:
lowerCamelCase__ =point - 1
else:
lowerCamelCase__ =point + 1
return None
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCamelCase__ =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCAmelCase , __lowerCAmelCase , point + 1 , __lowerCAmelCase )
def lowerCamelCase_ ( __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if collection != sorted(__lowerCAmelCase ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
a =0
if debug == 1:
a =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a =67
a =interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 530 | """simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : List[Any] = '''Wav2Vec2FeatureExtractor'''
A__ : Any = '''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _lowerCamelCase , )
lowerCamelCase__ =WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase__ =kwargs.pop("raw_speech" )
else:
lowerCamelCase__ =kwargs.pop("audio" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("sampling_rate" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("text" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase__ =self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowerCamelCase__ =self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ =encodings["input_ids"]
return inputs
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =kwargs.pop("input_features" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("labels" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if input_features is not None:
lowerCamelCase__ =self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
lowerCamelCase__ =self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase__ =labels["input_ids"]
return input_features
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _a ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase__ =True
lowerCamelCase__ =self.tokenizer
yield
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
| 530 | 1 |
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] ) -> Optional[int]:
snake_case__ = 0
snake_case__ = 0
snake_case__ = {}
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] ) -> List[Any]:
if vertex not in self.adjacency:
snake_case__ = {}
self.num_vertices += 1
def lowerCAmelCase_ ( self: int , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Dict ) -> Tuple:
self.add_vertex(UpperCamelCase )
self.add_vertex(UpperCamelCase )
if head == tail:
return
snake_case__ = weight
snake_case__ = weight
def lowerCAmelCase_ ( self: Dict ) -> str:
snake_case__ = self.get_edges()
for edge in edges:
snake_case__ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCamelCase ) ):
snake_case__ = list(edges[i] )
edges.sort(key=lambda UpperCamelCase : e[2] )
for i in range(len(UpperCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case__ = edges[i][2] + 1
for edge in edges:
snake_case__ = edge
snake_case__ = weight
snake_case__ = weight
def __str__( self: List[Any] ) -> Optional[Any]:
snake_case__ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case__ = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case__ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase_ ( self: Tuple ) -> Optional[Any]:
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: int=None , UpperCamelCase: Any=None ) -> List[Any]:
snake_case__ = Graph()
if vertices is None:
snake_case__ = []
if edges is None:
snake_case__ = []
for vertex in vertices:
g.add_vertex(UpperCamelCase )
for edge in edges:
g.add_edge(*UpperCamelCase )
return g
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[Any] ) -> List[str]:
snake_case__ = {}
snake_case__ = {}
def __len__( self: Any ) -> List[str]:
return len(self.parent )
def lowerCAmelCase_ ( self: str , UpperCamelCase: Dict ) -> List[str]:
if item in self.parent:
return self.find(UpperCamelCase )
snake_case__ = item
snake_case__ = 0
return item
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Any ) -> Any:
if item not in self.parent:
return self.make_set(UpperCamelCase )
if item != self.parent[item]:
snake_case__ = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] ) -> List[Any]:
snake_case__ = self.find(UpperCamelCase )
snake_case__ = self.find(UpperCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case__ = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case__ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case__ = roota
return roota
return None
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ = graph.num_vertices
snake_case__ = Graph.UnionFind()
snake_case__ = []
while num_components > 1:
snake_case__ = {}
for vertex in graph.get_vertices():
snake_case__ = -1
snake_case__ = graph.get_edges()
for edge in edges:
snake_case__ = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case__ = edge
snake_case__ = union_find.find(UpperCamelCase )
snake_case__ = union_find.find(UpperCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case__ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case__ = cheap_edge[vertex]
if union_find.find(UpperCamelCase ) != union_find.find(UpperCamelCase ):
union_find.union(UpperCamelCase , UpperCamelCase )
mst_edges.append(cheap_edge[vertex] )
snake_case__ = num_components - 1
snake_case__ = Graph.build(edges=UpperCamelCase )
return mst
| 704 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
snake_case__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(_A )
# Let's go
snake_case__ = parser.parse_args()
if not hasattr(_A , 'func' ):
parser.print_help()
exit(1 )
# Run
snake_case__ = args.func(_A )
service.run()
if __name__ == "__main__":
main()
| 372 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCamelCase : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCamelCase : List[str] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = SavedModel()
a = []
with open(os.path.join(__lowerCamelCase , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
a = json.load(__lowerCamelCase )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowerCamelCase )] )
with open(__lowerCamelCase , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
a = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
a = sorted(__lowerCamelCase )
a = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowerCamelCase )
if strict and len(__lowerCamelCase ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(__lowerCamelCase ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*__lowerCamelCase , sep="""\n""" )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__UpperCamelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 468 | 0 |
'''simple docstring'''
import re
from ..utils import cached_file
# docstyle-ignore
lowerCamelCase :int = '''
Human: <<task>>
Assistant: '''
lowerCamelCase :Union[str, Any] = '''huggingface-tools/default-prompts'''
lowerCamelCase :List[Any] = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowerCamelCase__ ) is not None:
return prompt_or_repo_id
A_ : str = cached_file(
lowerCamelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read() | 712 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase =logging.get_logger(__name__)
UpperCamelCase ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : str = '''realm'''
def __init__( self , __lowerCAmelCase=3_05_22 , __lowerCAmelCase=7_68 , __lowerCAmelCase=1_28 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=8 , __lowerCAmelCase=30_72 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_12 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=2_56 , __lowerCAmelCase=10 , __lowerCAmelCase=1E-3 , __lowerCAmelCase=5 , __lowerCAmelCase=3_20 , __lowerCAmelCase=13_35_37_18 , __lowerCAmelCase=50_00 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
UpperCamelCase_ : Optional[int] = vocab_size
UpperCamelCase_ : List[Any] = max_position_embeddings
UpperCamelCase_ : str = hidden_size
UpperCamelCase_ : str = retriever_proj_size
UpperCamelCase_ : int = num_hidden_layers
UpperCamelCase_ : int = num_attention_heads
UpperCamelCase_ : str = num_candidates
UpperCamelCase_ : Any = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : List[str] = hidden_dropout_prob
UpperCamelCase_ : List[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : str = type_vocab_size
UpperCamelCase_ : Tuple = layer_norm_eps
# Reader config
UpperCamelCase_ : List[Any] = span_hidden_size
UpperCamelCase_ : int = max_span_width
UpperCamelCase_ : List[str] = reader_layer_norm_eps
UpperCamelCase_ : Optional[int] = reader_beam_size
UpperCamelCase_ : Any = reader_seq_len
# Retrieval config
UpperCamelCase_ : Tuple = num_block_records
UpperCamelCase_ : str = searcher_beam_size
| 208 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
UpperCamelCase =[]
UpperCamelCase =[]
UpperCamelCase ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
UpperCamelCase =[
{
"type": "header",
"text": {
"type": "plain_text",
"text": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
UpperCamelCase =0
for log in Path().glob("*.log"):
UpperCamelCase =0
with open(log, "r") as f:
for line in f:
UpperCamelCase =json.loads(line)
if line.get("nodeid", "") != "":
UpperCamelCase =line["nodeid"]
if line.get("duration", None) is not None:
UpperCamelCase =f"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase =[]
log.unlink()
UpperCamelCase =""
UpperCamelCase =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase =[]
UpperCamelCase ={}
for test in failed_tests:
UpperCamelCase =test[0].split("::")
UpperCamelCase =data[0].split("/")[-1]
if data[0] not in filesafailed:
UpperCamelCase =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase =[test[0] for test in failed_table]
UpperCamelCase =list(set(files))
# Count number of instances in failed_tests
UpperCamelCase =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase =tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
UpperCamelCase ="Too many failed tests, please see the full report in the Action results."
UpperCamelCase =len(err) + 10
UpperCamelCase =message[: 3000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
UpperCamelCase ="No failed tests! 🤗"
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
UpperCamelCase =WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
UpperCamelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
UpperCamelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
UpperCamelCase ={
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
UpperCamelCase =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
UpperCamelCase =response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase =""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase =row[0]
else:
UpperCamelCase =""
UpperCamelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 208 | 1 |
'''simple docstring'''
from torch import nn
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self : int , __a : Optional[int] , __a : Tuple ):
super().__init__()
_a = class_size
_a = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_a = nn.Linear(__a , __a )
def UpperCamelCase__ ( self : Tuple , __a : Tuple ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
_a = self.mlp(__a )
return logits
| 521 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ : List[Any] = [
'good first issue',
'feature request',
'wip',
]
def _lowerCamelCase ( ) -> Dict:
_a = Github(os.environ["GITHUB_TOKEN"] )
_a = g.get_repo("huggingface/accelerate" )
_a = repo.get_issues(state="open" )
for issue in open_issues:
_a = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase : i.created_at , reverse=lowercase )
_a = comments[0] if len(lowercase ) > 0 else None
_a = dt.utcnow()
_a = (current_time - issue.updated_at).days
_a = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 521 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) ):
__snake_case : List[Any] = tau * frequency / samplerate
__snake_case : Union[str, Any] = sin(__lowerCamelCase )
__snake_case : int = cos(__lowerCamelCase )
__snake_case : Union[str, Any] = _sin / (2 * q_factor)
__snake_case : str = (1 - _cos) / 2
__snake_case : List[Any] = 1 - _cos
__snake_case : Any = 1 + alpha
__snake_case : List[str] = -2 * _cos
__snake_case : List[str] = 1 - alpha
__snake_case : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) ):
__snake_case : Dict = tau * frequency / samplerate
__snake_case : List[str] = sin(__lowerCamelCase )
__snake_case : Dict = cos(__lowerCamelCase )
__snake_case : int = _sin / (2 * q_factor)
__snake_case : Tuple = (1 + _cos) / 2
__snake_case : Tuple = -1 - _cos
__snake_case : List[Any] = 1 + alpha
__snake_case : Optional[Any] = -2 * _cos
__snake_case : Optional[Any] = 1 - alpha
__snake_case : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) ):
__snake_case : Union[str, Any] = tau * frequency / samplerate
__snake_case : Optional[Any] = sin(__lowerCamelCase )
__snake_case : Optional[Any] = cos(__lowerCamelCase )
__snake_case : Tuple = _sin / (2 * q_factor)
__snake_case : Any = _sin / 2
__snake_case : Tuple = 0
__snake_case : Optional[int] = -ba
__snake_case : List[str] = 1 + alpha
__snake_case : Dict = -2 * _cos
__snake_case : int = 1 - alpha
__snake_case : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) ):
__snake_case : Tuple = tau * frequency / samplerate
__snake_case : Union[str, Any] = sin(__lowerCamelCase )
__snake_case : str = cos(__lowerCamelCase )
__snake_case : Any = _sin / (2 * q_factor)
__snake_case : Optional[Any] = 1 - alpha
__snake_case : Optional[int] = -2 * _cos
__snake_case : Optional[Any] = 1 + alpha
__snake_case : List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) , ):
__snake_case : Tuple = tau * frequency / samplerate
__snake_case : List[str] = sin(__lowerCamelCase )
__snake_case : List[Any] = cos(__lowerCamelCase )
__snake_case : Optional[int] = _sin / (2 * q_factor)
__snake_case : Optional[int] = 1_0 ** (gain_db / 4_0)
__snake_case : Optional[int] = 1 + alpha * big_a
__snake_case : Dict = -2 * _cos
__snake_case : Optional[Any] = 1 - alpha * big_a
__snake_case : Optional[int] = 1 + alpha / big_a
__snake_case : Union[str, Any] = -2 * _cos
__snake_case : Optional[Any] = 1 - alpha / big_a
__snake_case : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) , ):
__snake_case : Tuple = tau * frequency / samplerate
__snake_case : int = sin(__lowerCamelCase )
__snake_case : Union[str, Any] = cos(__lowerCamelCase )
__snake_case : str = _sin / (2 * q_factor)
__snake_case : Union[str, Any] = 1_0 ** (gain_db / 4_0)
__snake_case : Dict = (big_a + 1) - (big_a - 1) * _cos
__snake_case : Tuple = (big_a + 1) + (big_a - 1) * _cos
__snake_case : int = (big_a - 1) - (big_a + 1) * _cos
__snake_case : Tuple = (big_a - 1) + (big_a + 1) * _cos
__snake_case : Any = 2 * sqrt(__lowerCamelCase ) * alpha
__snake_case : Any = big_a * (pmc + aaa)
__snake_case : Any = 2 * big_a * mpc
__snake_case : Any = big_a * (pmc - aaa)
__snake_case : List[Any] = ppmc + aaa
__snake_case : List[str] = -2 * pmpc
__snake_case : str = ppmc - aaa
__snake_case : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 / sqrt(2 ) , ):
__snake_case : List[Any] = tau * frequency / samplerate
__snake_case : Dict = sin(__lowerCamelCase )
__snake_case : List[Any] = cos(__lowerCamelCase )
__snake_case : int = _sin / (2 * q_factor)
__snake_case : str = 1_0 ** (gain_db / 4_0)
__snake_case : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
__snake_case : List[str] = (big_a + 1) + (big_a - 1) * _cos
__snake_case : Tuple = (big_a - 1) - (big_a + 1) * _cos
__snake_case : Any = (big_a - 1) + (big_a + 1) * _cos
__snake_case : str = 2 * sqrt(__lowerCamelCase ) * alpha
__snake_case : Any = big_a * (ppmc + aaa)
__snake_case : Tuple = -2 * big_a * pmpc
__snake_case : Tuple = big_a * (ppmc - aaa)
__snake_case : Dict = pmc + aaa
__snake_case : Tuple = 2 * mpc
__snake_case : List[Any] = pmc - aaa
__snake_case : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 81 |
_snake_case : Optional[int] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case : Dict = ["a", "b", "c", "d", "e"]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = start
# add current to visited
visited.append(__lowerCamelCase )
__snake_case : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case : Tuple = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
__snake_case : int = topological_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case : List[Any] = topological_sort("a", [], [])
print(sort)
| 81 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : List[str] =logging.get_logger(__name__)
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__A = 1024
__A = 4096
__A = 24
__A = 16
__A = [5, 11, 17, 23]
__A = [256, 512, 1024, 1024]
__A = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__A = 768
__A = [1, 1, 1, 0.5]
__A = [256, 512, 768, 768]
__A = 150
__A = 16
__A = (1, 384, 384)
__A = False
__A = "project"
if "ade" in checkpoint_url:
__A = True
__A = 768
__A = [1, 1, 1, 0.5]
__A = 150
__A = 16
__A = "huggingface/label-files"
__A = "ade20k-id2label.json"
__A = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
__A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__A = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__A = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__A = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__A = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__A = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__A = name.replace("proj" , "projection" )
if "blocks" in name:
__A = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__A = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__A = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__A = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__A = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__A = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__A = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__A = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__A = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__A = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__A = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__A = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__A = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__A = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__A = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__A = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__A = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__A = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__A = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__A = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__A = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__A = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__A = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__A = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__A = name.replace("pretrained" , "dpt" )
if "bn" in name:
__A = name.replace("bn" , "batch_norm" )
if "head" in name:
__A = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__A = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__A = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__A = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__A = name.replace(".." , "." )
if "stem.conv" in name:
__A = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__A = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__A = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__A = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__A = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__A = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__A = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__A = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[: config.hidden_size, :]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( ):
'''simple docstring'''
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A , __A = get_dpt_config(lowerCAmelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__A = torch.load(lowerCAmelCase__ , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase__ )
# rename keys
for key in state_dict.copy().keys():
__A = state_dict.pop(lowerCAmelCase__ )
__A = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# load HuggingFace model
__A = DPTForSemanticSegmentation(lowerCAmelCase__ ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# Check outputs on an image
__A = 480 if "ade" in checkpoint_url else 384
__A = DPTImageProcessor(size=lowerCAmelCase__ )
__A = prepare_img()
__A = image_processor(lowerCAmelCase__ , return_tensors="pt" )
# forward pass
__A = model(**lowerCAmelCase__ ).logits if "ade" in checkpoint_url else model(**lowerCAmelCase__ ).predicted_depth
if show_prediction:
__A = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=lowerCAmelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
snake_case_ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
snake_case_ : Tuple =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 721 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ):
'''simple docstring'''
__A = {}
if train_file is not None:
__A = [train_file]
if eval_file is not None:
__A = [eval_file]
if test_file is not None:
__A = [test_file]
__A = datasets.load_dataset("csv" , data_files=lowerCAmelCase__ )
__A = list(ds[list(files.keys() )[0]].features.keys() )
__A = features_name.pop(lowerCAmelCase__ )
__A = list(set(ds[list(files.keys() )[0]][label_name] ) )
__A = {label: i for i, label in enumerate(lowerCAmelCase__ )}
__A = tokenizer.model_input_names
__A = {}
if len(lowerCAmelCase__ ) == 1:
for k in files.keys():
__A = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) , batched=lowerCAmelCase__ , )
elif len(lowerCAmelCase__ ) == 2:
for k in files.keys():
__A = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) , batched=lowerCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : str =logging.getLogger(__name__)
@dataclass
class a__ :
UpperCAmelCase_ : int = field(metadata={'help': 'Which column contains the label'} )
UpperCAmelCase_ : str = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the training file'} )
UpperCAmelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the development file'} )
UpperCAmelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the test file'} )
UpperCAmelCase_ : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase_ : bool = field(
default=lowerCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class a__ :
UpperCAmelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCAmelCase_ : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A , __A , __A , __A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase__ ) -> Dict:
__A = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__A = TFTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowerCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 205 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _A :
def __init__( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str]=13 , __magic_name__ : List[Any]=7 , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Any=99 , __magic_name__ : List[Any]=[1, 1, 2] , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=32 , __magic_name__ : Optional[Any]=4 , __magic_name__ : Tuple=8 , __magic_name__ : int=37 , __magic_name__ : str="gelu_new" , __magic_name__ : Any=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : List[Any]=5_12 , __magic_name__ : Any=3 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : Optional[int]=4 , __magic_name__ : Any=None , __magic_name__ : int=False , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = parent
__snake_case : Optional[int] = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : List[str] = use_input_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Optional[int] = use_labels
__snake_case : Any = vocab_size
__snake_case : Union[str, Any] = block_sizes
__snake_case : List[str] = num_decoder_layers
__snake_case : List[str] = d_model
__snake_case : List[Any] = n_head
__snake_case : Tuple = d_head
__snake_case : Union[str, Any] = d_inner
__snake_case : Union[str, Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : List[str] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Tuple = 2
__snake_case : int = num_labels
__snake_case : List[Any] = num_choices
__snake_case : Any = scope
__snake_case : str = initializer_std
# Used in the tests to check the size of the first attention layer
__snake_case : Any = n_head
# Used in the tests to check the size of the first hidden state
__snake_case : Tuple = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__snake_case : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__snake_case : Dict = self.num_hidden_layers + 2
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : str = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = None
__snake_case : Tuple = None
if self.use_labels:
__snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = TFFunnelModel(config=__magic_name__ )
__snake_case : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[Any] = model(__magic_name__ )
__snake_case : Tuple = [input_ids, input_mask]
__snake_case : Tuple = model(__magic_name__ )
__snake_case : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__snake_case : str = False
__snake_case : Dict = TFFunnelModel(config=__magic_name__ )
__snake_case : str = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__snake_case : List[Any] = False
__snake_case : Any = TFFunnelModel(config=__magic_name__ )
__snake_case : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowercase__ ( self : Any , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Dict , ) -> List[Any]:
"""simple docstring"""
__snake_case : str = TFFunnelBaseModel(config=__magic_name__ )
__snake_case : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Any = model(__magic_name__ )
__snake_case : List[Any] = [input_ids, input_mask]
__snake_case : Dict = model(__magic_name__ )
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__snake_case : str = False
__snake_case : List[str] = TFFunnelBaseModel(config=__magic_name__ )
__snake_case : Optional[int] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__snake_case : Tuple = False
__snake_case : Optional[int] = TFFunnelBaseModel(config=__magic_name__ )
__snake_case : Dict = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowercase__ ( self : int , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = TFFunnelForPreTraining(config=__magic_name__ )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = TFFunnelForMaskedLM(config=__magic_name__ )
__snake_case : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : str = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , ) -> Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = self.num_labels
__snake_case : str = TFFunnelForSequenceClassification(config=__magic_name__ )
__snake_case : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : int , ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.num_choices
__snake_case : List[str] = TFFunnelForMultipleChoice(config=__magic_name__ )
__snake_case : str = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
__snake_case : Tuple = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
__snake_case : int = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
__snake_case : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : Dict = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : str , ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = TFFunnelForTokenClassification(config=__magic_name__ )
__snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = TFFunnelForQuestionAnswering(config=__magic_name__ )
__snake_case : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Any = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[Any] = config_and_inputs
__snake_case : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__: List[Any] = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__: List[Any] = False
lowercase__: str = False
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Dict = TFFunnelModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=__magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Dict = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowercase__: List[str] = False
lowercase__: List[str] = False
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Any = TFFunnelModelTester(self , base=__magic_name__ )
__snake_case : List[Any] = ConfigTester(self , config_class=__magic_name__ )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Optional[int] = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( _A , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = GPTSwaTokenizer
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Any = False
def UpperCAmelCase(self : Dict ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase(self : Any , _A : Optional[Any] ) -> Optional[Any]:
snake_case = "This is a test"
snake_case = "This is a test"
return input_text, output_text
def UpperCAmelCase(self : List[str] ) -> Union[str, Any]:
snake_case = "<s>"
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCAmelCase(self : List[Any] ) -> Optional[int]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__lowerCamelCase ) , 2_0_0_0 )
def UpperCAmelCase(self : List[Any] ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def UpperCAmelCase(self : Optional[int] ) -> Any:
snake_case = GPTSwaTokenizer(__lowerCamelCase )
snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
snake_case = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
snake_case = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def UpperCAmelCase(self : Dict ) -> Tuple:
snake_case = GPTSwaTokenizer(__lowerCamelCase )
snake_case = ["This is a test", "I was born in 92000, and this is falsé."]
snake_case = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase )
@slow
def UpperCAmelCase(self : int ) -> Any:
snake_case = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
snake_case = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
| 717 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Any = "poolformer"
def __init__(self : Optional[int] , _A : Optional[Any]=3 , _A : Optional[int]=1_6 , _A : Dict=1_6 , _A : Tuple=3 , _A : Tuple=4.0 , _A : int=[2, 2, 6, 2] , _A : Dict=[6_4, 1_2_8, 3_2_0, 5_1_2] , _A : int=[7, 3, 3, 3] , _A : List[str]=[4, 2, 2, 2] , _A : str=[2, 1, 1, 1] , _A : List[Any]=4 , _A : Any=0.0 , _A : Optional[Any]="gelu" , _A : Optional[Any]=True , _A : List[str]=1E-5 , _A : List[str]=0.02 , **_A : str , ) -> Tuple:
snake_case = num_channels
snake_case = patch_size
snake_case = stride
snake_case = padding
snake_case = pool_size
snake_case = hidden_sizes
snake_case = mlp_ratio
snake_case = depths
snake_case = patch_sizes
snake_case = strides
snake_case = num_encoder_blocks
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_layer_scale
snake_case = layer_scale_init_value
snake_case = initializer_range
super().__init__(**_A )
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = version.parse("1.11" )
@property
def UpperCAmelCase(self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : int ) -> float:
return 2E-3
| 294 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowercase ( ) -> None:
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 293 |
"""simple docstring"""
import argparse
from collections import defaultdict
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
__lowerCAmelCase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__snake_case ,"r" ) as f:
__lowerCAmelCase : List[str] = f.readlines()
__lowerCAmelCase : Tuple = F"""class {class_name}("""
__lowerCAmelCase : Tuple = F"""{4 * ' '}def {test_name}("""
__lowerCAmelCase : List[str] = F"""{8 * ' '}{correct_line.split()[0]}"""
__lowerCAmelCase : List[Any] = F"""{16 * ' '}{correct_line.split()[0]}"""
__lowerCAmelCase : Any = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : int = []
for line in lines:
if line.startswith(__snake_case ):
__lowerCAmelCase : Any = True
elif in_class and line.startswith(__snake_case ):
__lowerCAmelCase : str = True
elif in_class and in_func and (line.startswith(__snake_case ) or line.startswith(__snake_case )):
__lowerCAmelCase : Optional[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowerCAmelCase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowerCAmelCase : Dict = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
__lowerCAmelCase : Union[str, Any] = False
else:
new_lines.append(__snake_case )
with open(__snake_case ,"w" ) as f:
for line in new_lines:
f.write(__snake_case )
def _lowercase ( __snake_case ,__snake_case=None ) -> int:
if fail is not None:
with open(__snake_case ,"r" ) as f:
__lowerCAmelCase : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
__lowerCAmelCase : Optional[Any] = None
with open(__snake_case ,"r" ) as f:
__lowerCAmelCase : Union[str, Any] = f.readlines()
__lowerCAmelCase : Optional[Any] = defaultdict(__snake_case )
for line in correct_lines:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__snake_case : int = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 293 | 1 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
_lowerCamelCase = sum(lowercase_ ) / len(lowercase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
UpperCamelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
UpperCamelCase_ = {
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
UpperCamelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCamelCase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : Union[str, Any] = DPRContextEncoderTokenizer
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A : List[Any] = DPRQuestionEncoderTokenizer
UpperCamelCase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCamelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCamelCase_ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class _a :
'''simple docstring'''
def __call__( self, A, A = None, A = None, A = False, A = False, A = None, A = None, A = None, **A, ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
A, padding=A, truncation=A, max_length=A, return_tensors=A, return_attention_mask=A, **A, )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE : int = titles if texts is None else texts
return super().__call__(
A, A, padding=A, truncation=A, max_length=A, return_tensors=A, return_attention_mask=A, **A, )
SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(A, A ) else [titles]
SCREAMING_SNAKE_CASE : int = texts if not isinstance(A, A ) else [texts]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A )
SCREAMING_SNAKE_CASE : Tuple = questions if not isinstance(A, A ) else [questions] * n_passages
assert len(A ) == len(
A ), F"There should be as many titles than texts but got {len(A )} titles and {len(A )} texts."
SCREAMING_SNAKE_CASE : str = super().__call__(A, A, padding=A, truncation=A )['input_ids']
SCREAMING_SNAKE_CASE : int = super().__call__(A, add_special_tokens=A, padding=A, truncation=A )['input_ids']
SCREAMING_SNAKE_CASE : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A, A )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE : Optional[Any] = attention_mask
return self.pad(A, padding=A, max_length=A, return_tensors=A )
def UpperCamelCase_ ( self, A, A, A = 16, A = 64, A = 4, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = reader_input['input_ids']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = reader_output[:3]
SCREAMING_SNAKE_CASE : Optional[int] = len(A )
SCREAMING_SNAKE_CASE : List[str] = sorted(range(A ), reverse=A, key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE : Dict = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE : Optional[Any] = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = len(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=A, top_spans=A, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=A, start_index=A, end_index=A, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase_ ( self, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE : List[Any] = sorted(A, key=lambda A : x[1], reverse=A )
SCREAMING_SNAKE_CASE : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE : Dict = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
A : Dict = ['''input_ids''', '''attention_mask''']
A : str = DPRReaderTokenizer
| 28 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_snake_case = re.compile(r'([A-Z]+)([A-Z][a-z])')
_snake_case = re.compile(r'([a-z\d])([A-Z])')
_snake_case = re.compile(r'(?<!_)_(?!_)')
_snake_case = re.compile(r'(_{2,})')
_snake_case = r'^\w+(\.\w+)*$'
_snake_case = r'<>:/\|?*'
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : str = _uppercase_uppercase_re.sub(R"""\1_\2""" , UpperCamelCase__ )
_a : Optional[Any] = _lowercase_uppercase_re.sub(R"""\1_\2""" , UpperCamelCase__ )
return name.lower()
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = _single_underscore_re.split(UpperCamelCase__ )
_a : Union[str, Any] = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != """""" )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCamelCase__ )}-{split}"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
_a : Dict = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
_a : Any = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return F"""{filepath}*"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
_a : int = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
_a : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
_a : List[str] = len(UpperCamelCase__ )
_a : Optional[int] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
_a : Optional[int] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
_a : List[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 389 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_snake_case = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_snake_case = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_snake_case = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_snake_case = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_snake_case = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_snake_case = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = randrange(len(_a ) ), randrange(len(_a ) )
lowerCamelCase__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
lowerCamelCase__ , lowerCamelCase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def snake_case ( _a: int = 100 )-> Dict:
'''simple docstring'''
return (generate_random_hand() for _ in range(_a ))
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: Dict , _a: Any )-> List[str]:
'''simple docstring'''
assert PokerHand(_a )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: List[str] , _a: Dict )-> Union[str, Any]:
'''simple docstring'''
assert PokerHand(_a )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _a )
def snake_case ( _a: Dict , _a: Dict , _a: List[str] )-> Any:
'''simple docstring'''
lowerCamelCase__ = PokerHand(_a )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: str , _a: Tuple )-> Any:
'''simple docstring'''
assert PokerHand(_a )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _a )
def snake_case ( _a: Union[str, Any] , _a: Any )-> str:
'''simple docstring'''
assert PokerHand(_a )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _a )
def snake_case ( _a: int , _a: Optional[Any] , _a: str )-> List[str]:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def snake_case ( _a: List[Any] , _a: Optional[Any] , _a: Tuple )-> Dict:
'''simple docstring'''
assert PokerHand(_a ).compare_with(PokerHand(_a ) ) == expected
def snake_case ( )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = [PokerHand(_a ) for hand in SORTED_HANDS]
lowerCamelCase__ = poker_hands.copy()
shuffle(_a )
lowerCamelCase__ = chain(sorted(_a ) )
for index, hand in enumerate(_a ):
assert hand == poker_hands[index]
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_a )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = PokerHand('2C 4S AS 3D 5C' )
lowerCamelCase__ = True
lowerCamelCase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def snake_case ( )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = 0
lowerCamelCase__ = os.path.abspath(os.path.dirname(_a ) )
lowerCamelCase__ = os.path.join(_a , 'poker_hands.txt' )
with open(_a ) as file_hand:
for line in file_hand:
lowerCamelCase__ = line[:14].strip()
lowerCamelCase__ = line[15:].strip()
lowerCamelCase__ , lowerCamelCase__ = PokerHand(_a ), PokerHand(_a )
lowerCamelCase__ = player.compare_with(_a )
if output == "Win":
answer += 1
assert answer == 376
| 659 |
"""simple docstring"""
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations(_a: int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a: int , _a: list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowerCamelCase__ = answer
return answer
lowerCamelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def snake_case ( _a: int , _a: list[int] , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = [0] * (target + 1)
lowerCamelCase__ = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 659 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : str , snake_case__ : str = " " ):
A = []
A = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
A = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 91 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> int:
# Return True if there is node that has not iterated.
__lowercase = [False] * len(snake_case )
__lowercase = []
queue.append(snake_case )
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__lowercase = True
__lowercase = u
return visited[t]
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Dict:
# This array is filled by BFS and to store path
__lowercase = [-1] * (len(snake_case ))
__lowercase = 0
while bfs(snake_case , snake_case , snake_case , snake_case ):
__lowercase = float('Inf' )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(snake_case , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
return max_flow
SCREAMING_SNAKE_CASE_ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 375 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 0 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __magic_name__ ( lowercase , lowercase=None ):
require_version(deps[pkg] , lowercase )
| 409 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : List[str] = BigBirdConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
__A : List[str] = BigBirdForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
__A : List[Any] = BigBirdForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , is_trivia_qa=SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 111 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__SCREAMING_SNAKE_CASE : str = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__SCREAMING_SNAKE_CASE : str = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str ):
_lowerCAmelCase =WATERMARK_BITS
_lowerCAmelCase =WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def lowerCAmelCase__ ( self : str , lowerCamelCase_ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
_lowerCAmelCase =(255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase =[self.encoder.encode(lowerCamelCase_ , """dwtDct""" ) for image in images]
_lowerCAmelCase =torch.from_numpy(np.array(lowerCamelCase_ ) ).permute(0 , 3 , 1 , 2 )
_lowerCAmelCase =torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 705 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__SCREAMING_SNAKE_CASE : List[str] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
__SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def snake_case_ ( lowercase__ : Any , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =create_model(
"""HTSAT-tiny""" , """roberta""" , lowercase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowercase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case_ ( lowercase__ : str ):
'''simple docstring'''
_lowerCAmelCase ={}
_lowerCAmelCase =r""".*sequential.(\d+).*"""
_lowerCAmelCase =r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase =key.replace(lowercase__ , lowercase__ )
if re.match(lowercase__ , lowercase__ ):
# replace sequential layers with list
_lowerCAmelCase =re.match(lowercase__ , lowercase__ ).group(1 )
_lowerCAmelCase =key.replace(f"sequential.{sequential_layer}." , f"layers.{int(lowercase__ )//3}.linear." )
elif re.match(lowercase__ , lowercase__ ):
_lowerCAmelCase =int(re.match(lowercase__ , lowercase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase =1 if projecton_layer == 0 else 2
_lowerCAmelCase =key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase =value
_lowerCAmelCase =mixed_qkv.size(0 ) // 3
_lowerCAmelCase =mixed_qkv[:qkv_dim]
_lowerCAmelCase =mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase =mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase =query_layer
_lowerCAmelCase =key_layer
_lowerCAmelCase =value_layer
else:
_lowerCAmelCase =value
return model_state_dict
def snake_case_ ( lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =init_clap(lowercase__ , enable_fusion=lowercase__ )
clap_model.eval()
_lowerCAmelCase =clap_model.state_dict()
_lowerCAmelCase =rename_state_dict(lowercase__ )
_lowerCAmelCase =ClapConfig()
_lowerCAmelCase =enable_fusion
_lowerCAmelCase =ClapModel(lowercase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.save_pretrained(lowercase__ )
transformers_config.save_pretrained(lowercase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 149 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase : Union[str, Any] = False, False, False
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = True
lowercase__ = True
lowercase__ = None
# Automatically constructed
lowercase__ = "dict"
lowercase__ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowercase__ = field(default="Audio" , init=UpperCamelCase_ , repr=UpperCamelCase_ )
def __call__( self : Dict):
"""simple docstring"""
return self.pa_type
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[str, bytes, dict]):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err
if isinstance(__A , __A):
return {"bytes": None, "path": value}
elif isinstance(__A , __A):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase_ = BytesIO()
sf.write(__A , value["""array"""] , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""") is not None and os.path.isfile(value["""path"""]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm"""):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""")
if value.get("""bytes"""):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase_ = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2_7_6_7
else:
lowercase_ = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2_7_6_7
lowercase_ = BytesIO(bytes())
sf.write(__A , __A , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""")}
elif value.get("""bytes""") is not None or value.get("""path""") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes"""), "path": value.get("""path""")}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : dict , lowerCAmelCase_ : Optional[Dict[str, Union[str, bool, None]]] = None):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""")
lowercase_ = (value["path"], BytesIO(value["""bytes"""])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err
lowercase_ = xsplitext(__A)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
if file is None:
lowercase_ = token_per_repo_id or {}
lowercase_ = path.split("""::""")[-1]
try:
lowercase_ = string_to_dict(__A , config.HUB_DATASETS_URL)["repo_id"]
lowercase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase_ = None
with xopen(__A , """rb""" , use_auth_token=__A) as f:
lowercase_ = sf.read(__A)
else:
lowercase_ = sf.read(__A)
lowercase_ = array.T
if self.mono:
lowercase_ = librosa.to_mono(__A)
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase_ = librosa.resample(__A , orig_sr=__A , target_sr=self.sampling_rate)
lowercase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""")
return {
"bytes": Value("""binary"""),
"path": Value("""string"""),
}
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Union[pa.StringArray, pa.StructArray]):
"""simple docstring"""
if pa.types.is_string(storage.type):
lowercase_ = pa.array([None] * len(__A) , type=pa.binary())
lowercase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowercase_ = pa.array([None] * len(__A) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""):
lowercase_ = pa.array([Audio().encode_example(__A) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("""bytes""") >= 0:
lowercase_ = storage.field("""bytes""")
else:
lowercase_ = pa.array([None] * len(__A) , type=pa.binary())
if storage.type.get_field_index("""path""") >= 0:
lowercase_ = storage.field("""path""")
else:
lowercase_ = pa.array([None] * len(__A) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
return array_cast(__A , self.pa_type)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : pa.StructArray):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ : Optional[Any]):
with xopen(__A , """rb""") as f:
lowercase_ = f.read()
return bytes_
lowercase_ = pa.array(
[
(path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ = pa.array(
[os.path.basename(__A) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(__A , self.pa_type)
| 567 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Dict , __A : str=1_3 , __A : str=7 , __A : Optional[int]=True , __A : int=True , __A : str=True , __A : Tuple=True , __A : Optional[int]=9_9 , __A : Optional[int]=3_2 , __A : Any=5 , __A : List[Any]=4 , __A : str=3_7 , __A : Union[str, Any]="gelu" , __A : str=0.1 , __A : Dict=0.1 , __A : Union[str, Any]=5_1_2 , __A : str=1_6 , __A : Optional[int]=2 , __A : List[Any]=0.0_2 , __A : Union[str, Any]=3 , __A : Optional[Any]=4 , __A : Optional[int]=None , ):
snake_case__ : int = parent
snake_case__ : str = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : Any = use_token_type_ids
snake_case__ : Optional[Any] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : int = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : str = num_labels
snake_case__ : List[Any] = num_choices
snake_case__ : Union[str, Any] = scope
def _lowercase ( self : int ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Optional[Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowercase ( self : Tuple , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : Tuple ):
snake_case__ : List[str] = NystromformerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : str = model(__A , attention_mask=__A , token_type_ids=__A )
snake_case__ : Optional[int] = model(__A , token_type_ids=__A )
snake_case__ : Any = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __A : Tuple , __A : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Dict = NystromformerForMaskedLM(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , __A : List[str] , __A : Tuple , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Union[str, Any] ):
snake_case__ : Any = NystromformerForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , __A : str , __A : Any , __A : str , __A : Optional[int] , __A : str , __A : Optional[Any] , __A : Union[str, Any] ):
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = NystromformerForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : Any , __A : Optional[Any] , __A : int , __A : Union[str, Any] , __A : List[str] , __A : Any ):
snake_case__ : int = self.num_labels
snake_case__ : Tuple = NystromformerForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : List[Any] , __A : str , __A : Optional[int] ):
snake_case__ : str = self.num_choices
snake_case__ : Optional[int] = NystromformerForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : str = config_and_inputs
snake_case__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : int = NystromformerModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : str ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : str = type
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Any ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : int ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = NystromformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : List[Any] ):
snake_case__ : str = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
snake_case__ : Any = model(__A )[0]
snake_case__ : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = "the [MASK] of Belgium is Brussels"
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : Tuple = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
snake_case__ : List[Any] = tokenizer(__A , return_tensors="pt" )
with torch.no_grad():
snake_case__ : List[str] = model(encoding.input_ids ).logits
snake_case__ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__A ) , "capital" )
| 297 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Dict = KandinskyVaaControlnetImgaImgPipeline
_lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase : Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase : Any = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
lowerCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
lowerCAmelCase : Dict = self.dummy_unet
lowerCAmelCase : int = self.dummy_movq
lowerCAmelCase : str = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase : Dict = DDIMScheduler(**snake_case__ )
lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase ( self , snake_case__ , snake_case__=0 ):
lowerCAmelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : int = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create hint
lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
lowerCAmelCase : Dict = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : str = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = 'cpu'
lowerCAmelCase : int = self.get_dummy_components()
lowerCAmelCase : Optional[Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Any = output.images
lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : int = image[0, -3:, -3:, -1]
lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase : List[Any] = init_image.resize((512, 512) )
lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
lowerCAmelCase : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase : List[str] = 'A robot, 4k photo'
lowerCAmelCase : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCAmelCase : str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase : int = pipe_prior(
snake_case__ , image=snake_case__ , strength=0.8_5 , generator=snake_case__ , negative_prompt='' , ).to_tuple()
lowerCAmelCase : int = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 706 |
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__SCREAMING_SNAKE_CASE: Dict = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , x.transpose() ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Tuple = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , transpose(_lowerCAmelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE: Tuple = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE: int = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , transpose(_lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Dict = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , transpose(_lowerCAmelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE: Dict = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE: Optional[int] = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , transpose(_lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase ) , np.asarray(transpose(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: Tuple = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , np.reshape(_lowerCAmelCase , (4, 3) ) ) )
__SCREAMING_SNAKE_CASE: List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , np.reshape(_lowerCAmelCase , (12, 5) ) ) )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: List[str] = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , reshape(_lowerCAmelCase , (4, 3) ).numpy() ) )
__SCREAMING_SNAKE_CASE: List[Any] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE: Any = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , reshape(_lowerCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Optional[int] = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , reshape(_lowerCAmelCase , (4, 3) ).numpy() ) )
__SCREAMING_SNAKE_CASE: Tuple = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , reshape(_lowerCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Any = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (4, 3) ) , np.asarray(reshape(_lowerCAmelCase , (4, 3) ) ) ) )
__SCREAMING_SNAKE_CASE: str = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE: Any = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(reshape(_lowerCAmelCase , (12, 5) ) , np.asarray(reshape(_lowerCAmelCase , (12, 5) ) ) ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , np.squeeze(_lowerCAmelCase ) ) )
__SCREAMING_SNAKE_CASE: List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , np.squeeze(_lowerCAmelCase , axis=2 ) ) )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE: List[str] = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , squeeze(_lowerCAmelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE: Union[str, Any] = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , squeeze(_lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE: Tuple = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , squeeze(_lowerCAmelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE: Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE: Any = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , squeeze(_lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase ) , np.asarray(squeeze(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: Any = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE: Any = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(_lowerCAmelCase , axis=2 ) , np.asarray(squeeze(_lowerCAmelCase , axis=2 ) ) ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , np.expand_dims(_lowerCAmelCase , axis=1 ) ) )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: List[str] = torch.tensor(_lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , expand_dims(_lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Optional[int] = tf.constant(_lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , expand_dims(_lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.array(_lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(_lowerCAmelCase , axis=1 ) ) ) )
| 202 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase : Dict = """\
Text data.
Second line of data."""
lowerCAmelCase : str = """file"""
@pytest.fixture(scope='''session''' )
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__SCREAMING_SNAKE_CASE: Optional[Any] = bytes(UpperCamelCase__ , '''utf-8''' )
with zstd.open(UpperCamelCase__ , '''wb''' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , '''w''' ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__SCREAMING_SNAKE_CASE: List[str] = input_paths[compression_format]
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE: List[str] = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Tuple = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Optional[Any] = f.read()
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = '''custom_cache'''
__SCREAMING_SNAKE_CASE: Optional[int] = '''custom_extracted_dir'''
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
__SCREAMING_SNAKE_CASE: int = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , UpperCamelCase__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
__SCREAMING_SNAKE_CASE: Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE: Dict = xz_file
__SCREAMING_SNAKE_CASE: Optional[Any] = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
__SCREAMING_SNAKE_CASE: int = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE: Optional[int] = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
__SCREAMING_SNAKE_CASE: int = '''./__missing_file__.txt'''
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(UpperCamelCase__ ) as f:
__SCREAMING_SNAKE_CASE: Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( ) -> Any:
"""simple docstring"""
with pytest.raises(UpperCamelCase__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
http_get('''https://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(UpperCamelCase__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head('''s3://huggingface.co''' )
| 202 | 1 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 200 ):
"""simple docstring"""
UpperCamelCase = [1, 2, 5, 10, 20, 50, 100, 200]
UpperCamelCase = [0] * (pence + 1)
UpperCamelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 181 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowercase ( *SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = list(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowercase ( SCREAMING_SNAKE_CASE_ : Exception ):
"""simple docstring"""
UpperCamelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowercase ( SCREAMING_SNAKE_CASE_ : callable = None , SCREAMING_SNAKE_CASE_ : int = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(SCREAMING_SNAKE_CASE_ , starting_batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = starting_batch_size
def decorator(*SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase = list(inspect.signature(SCREAMING_SNAKE_CASE_ ).parameters.keys() )
# Guard against user error
if len(SCREAMING_SNAKE_CASE_ ) < (len(SCREAMING_SNAKE_CASE_ ) + 1):
UpperCamelCase = """, """.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'Batch size was passed into `{function.__name__}` as the first argument when called.'
f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
except Exception as e:
if should_reduce_batch_size(SCREAMING_SNAKE_CASE_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 181 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[Any] = StableUnCLIPImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : List[Any] = frozenset([] )
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = 32
lowercase__ = embedder_hidden_size
# image encoding components
lowercase__ = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
lowercase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase)
lowercase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ))
torch.manual_seed(0)
lowercase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0)
lowercase__ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0)
lowercase__ = AutoencoderKL()
lowercase__ = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict=0 , lowerCAmelCase : Union[str, Any]=True) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase).startswith('mps'):
lowercase__ = torch.manual_seed(lowerCAmelCase)
else:
lowercase__ = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase)).to(lowerCAmelCase)
if pil_image:
lowercase__ = input_image * 0.5 + 0.5
lowercase__ = input_image.clamp(0 , 1)
lowercase__ = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowercase__ = DiffusionPipeline.numpy_to_pil(lowerCAmelCase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableUnCLIPImgaImgPipeline(**lowerCAmelCase)
lowercase__ = sd_pipe.to(lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
inputs.update({'image_embeds': None})
lowercase__ = sd_pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
lowercase__ = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase)
@slow
@require_torch_gpu
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device='cpu').manual_seed(0)
lowercase__ = pipe(lowerCAmelCase , 'anime turle' , generator=lowerCAmelCase , output_type='np')
lowercase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device='cpu').manual_seed(0)
lowercase__ = pipe(lowerCAmelCase , 'anime turle' , generator=lowerCAmelCase , output_type='np')
lowercase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
lowercase__ = pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
lowerCAmelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 622 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = CTRLTokenizer
A : Tuple = False
A : Any = False
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase))
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 'adapt react readapt apt'
lowercase__ = 'adapt react readapt apt'
return input_text, output_text
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowercase__ = 'adapt react readapt apt'
lowercase__ = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
| 622 | 1 |
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
if n == 0:
return 0
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowercase))
return max_revue
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowercase , __lowercase) , )
UpperCamelCase_ = max_revenue
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
UpperCamelCase_ = 0
for i in range(1 , n + 1):
UpperCamelCase_ = max_rev[i]
for j in range(1 , i + 1):
UpperCamelCase_ = max(__lowercase , prices[j - 1] + max_rev[i - j])
UpperCamelCase_ = max_revenue_i
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
if n < 0:
UpperCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__lowercase)
if n > len(__lowercase):
UpperCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(__lowercase)}"""
)
raise ValueError(__lowercase)
def _snake_case ():
UpperCamelCase_ = [6, 10, 12, 15, 20, 23]
UpperCamelCase_ = len(__lowercase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase_ = 36
UpperCamelCase_ = top_down_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = bottom_up_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = naive_cut_rod_recursive(__lowercase , __lowercase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 618 |
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
if n == 0:
return 0
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , __lowercase))
return max_revue
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(__lowercase , __lowercase , __lowercase)
def _snake_case (__lowercase , __lowercase , __lowercase):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
UpperCamelCase_ = float('-inf')
for i in range(1 , n + 1):
UpperCamelCase_ = max(
__lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __lowercase , __lowercase) , )
UpperCamelCase_ = max_revenue
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
_enforce_args(__lowercase , __lowercase)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
UpperCamelCase_ = [float('-inf') for _ in range(n + 1)]
UpperCamelCase_ = 0
for i in range(1 , n + 1):
UpperCamelCase_ = max_rev[i]
for j in range(1 , i + 1):
UpperCamelCase_ = max(__lowercase , prices[j - 1] + max_rev[i - j])
UpperCamelCase_ = max_revenue_i
return max_rev[n]
def _snake_case (__lowercase , __lowercase):
if n < 0:
UpperCamelCase_ = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(__lowercase)
if n > len(__lowercase):
UpperCamelCase_ = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(__lowercase)}"""
)
raise ValueError(__lowercase)
def _snake_case ():
UpperCamelCase_ = [6, 10, 12, 15, 20, 23]
UpperCamelCase_ = len(__lowercase)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
UpperCamelCase_ = 36
UpperCamelCase_ = top_down_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = bottom_up_cut_rod(__lowercase , __lowercase)
UpperCamelCase_ = naive_cut_rod_recursive(__lowercase , __lowercase)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 618 | 1 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
def A ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else ""
# apply OCR
lowerCAmelCase__ = to_pil_image(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(UpperCamelCase_ , lang=UpperCamelCase_ , output_type="dict" , config=UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(UpperCamelCase_ ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase_ )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = ['pixel_values']
def __init__( self : List[str] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = "" , **__magic_name__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size["height"], size["width"])
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[str] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ ,lowerCAmelCase__ = apply_tesseract(__magic_name__ , __magic_name__ , __magic_name__ )
words_batch.append(__magic_name__ )
boxes_batch.append(__magic_name__ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCAmelCase__ = [flip_channel_order(__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={"pixel_values": images} , tensor_type=__magic_name__ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data
| 48 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> float:
if density <= 0:
raise ValueError("Impossible fluid density")
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 515 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 715 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : int = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE_ : List[str] = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Dict = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=lowercase_ , return_dict=lowercase_)[
0
]
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''google/ncsnpp-church-256'''
SCREAMING_SNAKE_CASE_ : str = UNetaDModel.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ScoreSdeVeScheduler.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_)
sde_ve.to(lowercase_)
sde_ve.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[str] = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=lowercase_).images
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 176 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_snake_case = (3, 9, -11, 0, 7, 5, 1, -1)
_snake_case = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _a :
a_ : int
a_ : Node | None
class _a :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Iterable[int] ):
lowerCamelCase__ = None
for i in sorted(SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = Node(SCREAMING_SNAKE_CASE__ , self.head )
def __iter__( self : int ):
lowerCamelCase__ = self.head
while node:
yield node.data
lowerCamelCase__ = node.next_node
def __len__( self : Any ):
return sum(1 for _ in self )
def __str__( self : Optional[int] ):
return " -> ".join([str(SCREAMING_SNAKE_CASE__ ) for node in self] )
def snake_case ( _a: Dict , _a: List[str] )-> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(_A ) + list(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 510 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : str = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self :Union[str, Any] , **__magic_name__ :int ) ->Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase : int = deprecated_arg[3:]
setattr(self , __magic_name__ , not kwargs.pop(__magic_name__ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase : List[str] = kwargs.pop("""torchscript""" , self.torchscript )
lowercase : Any = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase : Union[str, Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__magic_name__ )
_SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Trace the models using torchscript"""} )
_SCREAMING_SNAKE_CASE : bool = field(default=__snake_case , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
_SCREAMING_SNAKE_CASE : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def __snake_case ( self :Optional[int] ) ->Tuple["torch.device", int]:
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase : Optional[Any] = torch.device("""cpu""" )
lowercase : int = 0
elif is_torch_tpu_available():
lowercase : Any = xm.xla_device()
lowercase : List[str] = 0
else:
lowercase : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def __snake_case ( self :Optional[int] ) ->Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def __snake_case ( self :Optional[int] ) ->int:
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __snake_case ( self :Optional[int] ) ->"torch.device":
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def __snake_case ( self :Optional[Any] ) ->str:
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def __snake_case ( self :int ) ->List[str]:
return self.n_gpu > 0
| 264 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowerCAmelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = '''ernie_m'''
__lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self ,__UpperCAmelCase = 25_0002 ,__UpperCAmelCase = 768 ,__UpperCAmelCase = 12 ,__UpperCAmelCase = 12 ,__UpperCAmelCase = 3072 ,__UpperCAmelCase = "gelu" ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 514 ,__UpperCAmelCase = 0.0_2 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = 1E-05 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0 ,**__UpperCAmelCase ,) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : int = classifier_dropout
lowerCAmelCase__ : int = is_decoder
lowerCAmelCase__ : str = act_dropout
| 160 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 160 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: List[str] ) -> List[str]:
__UpperCAmelCase : Any = get_activation("swish" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowerCamelCase ( self: str ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = get_activation("silu" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowerCamelCase ( self: Union[str, Any] ) -> List[str]:
__UpperCAmelCase : str = get_activation("mish" )
self.assertIsInstance(lowerCAmelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowerCamelCase ( self: Dict ) -> Optional[int]:
__UpperCAmelCase : Any = get_activation("gelu" )
self.assertIsInstance(lowerCAmelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 382 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 393 | 0 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
if rng is None:
lowerCAmelCase__ : str = global_rng
lowerCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Dict=4_00 , UpperCamelCase : Dict=20_00 , UpperCamelCase : List[Any]=10 , UpperCamelCase : Optional[int]=1_60 , UpperCamelCase : Tuple=8 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Optional[Any]=40_00 , UpperCamelCase : str=False , UpperCamelCase : Dict=True , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : Optional[int] = min_seq_length
lowerCAmelCase__ : List[Any] = max_seq_length
lowerCAmelCase__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : str = padding_value
lowerCAmelCase__ : int = sampling_rate
lowerCAmelCase__ : Optional[Any] = return_attention_mask
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = feature_size
lowerCAmelCase__ : List[str] = chunk_length
lowerCAmelCase__ : Union[str, Any] = hop_length
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
def _flatten(UpperCamelCase : Any ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
lowerCAmelCase__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Optional[int] = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
_lowerCamelCase :str = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = WhisperFeatureExtractionTester(self )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : str = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
lowerCAmelCase__ : int = self.feature_extraction_class.from_pretrained(__lowercase )
lowerCAmelCase__ : Dict = feat_extract_first.to_dict()
lowerCAmelCase__ : List[Any] = feat_extract_second.to_dict()
lowerCAmelCase__ : int = feat_extract_first.mel_filters
lowerCAmelCase__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Any = os.path.join(__lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowercase )
lowerCAmelCase__ : Any = self.feature_extraction_class.from_json_file(__lowercase )
lowerCAmelCase__ : Any = feat_extract_first.to_dict()
lowerCAmelCase__ : Tuple = feat_extract_second.to_dict()
lowerCAmelCase__ : Optional[Any] = feat_extract_first.mel_filters
lowerCAmelCase__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase__ : Tuple = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase__ : Tuple = feature_extractor(__lowercase , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase__ : Dict = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCAmelCase__ : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ : List[str] = feature_extractor(__lowercase , return_tensors="""np""" ).input_features
lowerCAmelCase__ : str = feature_extractor(__lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : str = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase__ : Optional[Any] = np.asarray(__lowercase )
lowerCAmelCase__ : Any = feature_extractor(__lowercase , return_tensors="""np""" ).input_features
lowerCAmelCase__ : Optional[int] = feature_extractor(__lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
# Test truncation required
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
lowerCAmelCase__ : List[str] = [np.asarray(__lowercase ) for speech_input in speech_inputs]
lowerCAmelCase__ : int = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase__ : int = [np.asarray(__lowercase ) for speech_input in speech_inputs_truncated]
lowerCAmelCase__ : List[str] = feature_extractor(__lowercase , return_tensors="""np""" ).input_features
lowerCAmelCase__ : List[Any] = feature_extractor(__lowercase , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
import torch
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Any = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCAmelCase__ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase__ : Tuple = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCAmelCase__ : Dict = ds.sort("""id""" ).select(range(__lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
# fmt: off
lowerCAmelCase__ : Dict = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowerCAmelCase__ : int = self._load_datasamples(1 )
lowerCAmelCase__ : int = WhisperFeatureExtractor()
lowerCAmelCase__ : int = feature_extractor(__lowercase , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __lowercase , atol=1E-4 ) )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Optional[int] = self._load_datasamples(1 )[0]
lowerCAmelCase__ : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
lowerCAmelCase__ : Tuple = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__lowercase )[0]
self.assertTrue(np.all(np.mean(__lowercase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase ) - 1 ) < 1E-3 ) )
| 715 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCAmelCase__ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCAmelCase__ : List[str] = test_metrics
@require_cpu
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase__ : str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 507 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase = 16
UpperCAmelCase = 32
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ):
lowercase = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase = load_dataset('glue' , 'mrpc' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
lowercase = DataLoader(
tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __SCREAMING_SNAKE_CASE ) == "1":
lowercase = 2
# New Code #
lowercase = int(args.gradient_accumulation_steps )
lowercase = int(args.local_sgd_steps )
# Initialize accelerator
lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config['lr']
lowercase = int(config['num_epochs'] )
lowercase = int(config['seed'] )
lowercase = int(config['batch_size'] )
lowercase = evaluate.load('glue' , 'mrpc' )
set_seed(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , local_sgd_steps=__SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**__SCREAMING_SNAKE_CASE )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=__SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowercase = parser.parse_args()
lowercase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 84 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Tuple = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase : List[Any] = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : Union[str, Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__lowercase : Any = bs[:]
__lowercase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__lowercase : List[str] = [chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[str]:
__lowercase : Optional[int] = set()
__lowercase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : List[Any] = char
return pairs
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : str = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , _snake_case : List[Any] , _snake_case : Dict , _snake_case : int="replace" , _snake_case : Optional[int]="<s>" , _snake_case : List[Any]="</s>" , _snake_case : str="</s>" , _snake_case : Optional[int]="<s>" , _snake_case : List[Any]="<unk>" , _snake_case : Optional[Any]="<pad>" , _snake_case : List[Any]="<mask>" , _snake_case : Optional[Any]=False , **_snake_case : Dict , ):
__lowercase : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
__lowercase : List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
__lowercase : Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
__lowercase : List[str] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
__lowercase : Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
__lowercase : Optional[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : List[str] = json.load(_snake_case )
__lowercase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__lowercase : Dict = errors # how to handle errors in decoding
__lowercase : str = bytes_to_unicode()
__lowercase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
__lowercase : Any = merges_handle.read().split('''\n''' )[1:-1]
__lowercase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase : Dict = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : Dict = {}
__lowercase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case_ ( self : Dict ):
return len(self.encoder )
def snake_case_ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Tuple , _snake_case : str ):
if token in self.cache:
return self.cache[token]
__lowercase : Optional[Any] = tuple(_snake_case )
__lowercase : str = get_pairs(_snake_case )
if not pairs:
return token
while True:
__lowercase : Dict = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase : Tuple = bigram
__lowercase : Dict = []
__lowercase : int = 0
while i < len(_snake_case ):
try:
__lowercase : List[Any] = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Optional[int] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : str = tuple(_snake_case )
__lowercase : Tuple = new_word
if len(_snake_case ) == 1:
break
else:
__lowercase : Any = get_pairs(_snake_case )
__lowercase : Union[str, Any] = ''' '''.join(_snake_case )
__lowercase : Union[str, Any] = word
return word
def snake_case_ ( self : Tuple , _snake_case : List[Any] ):
__lowercase : Optional[int] = []
for token in re.findall(self.pat , _snake_case ):
__lowercase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(''' ''' ) )
return bpe_tokens
def snake_case_ ( self : Union[str, Any] , _snake_case : List[Any] ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : Dict , _snake_case : List[Any] ):
return self.decoder.get(_snake_case )
def snake_case_ ( self : int , _snake_case : Dict ):
__lowercase : List[Any] = ''''''.join(_snake_case )
__lowercase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case_ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : Union[str, Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : str = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
__lowercase : List[str] = 0
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase : List[str] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : int = [self.cls_token_id]
__lowercase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def snake_case_ ( self : Union[str, Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
__lowercase : Dict = [self.sep_token_id]
__lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[int]=False , **_snake_case : List[Any] ):
__lowercase : int = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
__lowercase : Optional[Any] = ''' ''' + text
return (text, kwargs)
def snake_case_ ( self : Tuple , _snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , _snake_case : Optional[int] = None , _snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , ):
__lowercase : Optional[Any] = super()._pad(
encoded_inputs=_snake_case , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , )
# Load from model defaults
if return_attention_mask is None:
__lowercase : List[str] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_snake_case )
if needs_to_be_padded:
__lowercase : List[Any] = len(_snake_case ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase : Union[str, Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase : Union[str, Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 284 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.