code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Any:
if config_name_or_path is None:
_lowercase : Optional[int] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
_lowercase : List[str] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowercase : Optional[Any] = question_encoder_name_or_path
_lowercase : Dict = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
_lowercase : Any = RagConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Tuple = AutoConfig.from_pretrained(lowerCamelCase_ )
_lowercase : int = AutoConfig.from_pretrained(lowerCamelCase_ )
_lowercase : List[Any] = gen_config
_lowercase : List[Any] = question_encoder_config
_lowercase : List[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase_ , lowerCamelCase_ , config=lowerCamelCase_ )
rag_model.save_pretrained(lowerCamelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase_ )
# Save tokenizers.
_lowercase : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
_lowercase : Any = AutoTokenizer.from_pretrained(lowerCamelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 21 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = """AutoTokenizer"""
_lowerCamelCase : int = ["""tokenizer"""]
_lowerCamelCase : Dict = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None ):
super().__init__(snake_case_ )
_UpperCAmelCase = speaker_embeddings
@classmethod
def lowercase ( cls : int , snake_case_ : Optional[int] , snake_case_ : List[Any]="speaker_embeddings_path.json" , **snake_case_ : Dict ):
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
snake_case_ , snake_case_ , subfolder=kwargs.pop("subfolder" , snake_case_ ) , cache_dir=kwargs.pop("cache_dir" , snake_case_ ) , force_download=kwargs.pop("force_download" , snake_case_ ) , proxies=kwargs.pop("proxies" , snake_case_ ) , resume_download=kwargs.pop("resume_download" , snake_case_ ) , local_files_only=kwargs.pop("local_files_only" , snake_case_ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case_ ) , revision=kwargs.pop("revision" , snake_case_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(snake_case_ , snake_case_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCAmelCase = None
else:
with open(snake_case_ ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(snake_case_ )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(tokenizer=snake_case_ , speaker_embeddings=snake_case_ )
def lowercase ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Dict="speaker_embeddings_path.json" , snake_case_ : List[str]="speaker_embeddings" , snake_case_ : bool = False , **snake_case_ : Any , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case_ , snake_case_ , "v2" ) , exist_ok=snake_case_ )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(snake_case_ )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , snake_case_ , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=snake_case_ , )
_UpperCAmelCase = os.path.join(snake_case_ , f'{prompt_key}_{key}.npy' )
_UpperCAmelCase = tmp_dict
with open(os.path.join(snake_case_ , snake_case_ ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
super().save_pretrained(snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : str = None , **snake_case_ : Tuple ):
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , snake_case_ ) , cache_dir=kwargs.pop("cache_dir" , snake_case_ ) , force_download=kwargs.pop("force_download" , snake_case_ ) , proxies=kwargs.pop("proxies" , snake_case_ ) , resume_download=kwargs.pop("resume_download" , snake_case_ ) , local_files_only=kwargs.pop("local_files_only" , snake_case_ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case_ ) , revision=kwargs.pop("revision" , snake_case_ ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCAmelCase = np.load(snake_case_ )
return voice_preset_dict
def lowercase ( self : List[Any] , snake_case_ : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : List[Any] , snake_case_ : Tuple=None , snake_case_ : Any=None , snake_case_ : Any="pt" , snake_case_ : List[str]=2_5_6 , snake_case_ : str=False , snake_case_ : Dict=True , snake_case_ : str=False , **snake_case_ : Optional[int] , ):
if voice_preset is not None and not isinstance(snake_case_ , snake_case_ ):
if (
isinstance(snake_case_ , snake_case_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(snake_case_ )
else:
if isinstance(snake_case_ , snake_case_ ) and not voice_preset.endswith(".npz" ):
_UpperCAmelCase = voice_preset + ".npz"
_UpperCAmelCase = np.load(snake_case_ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case_ , **snake_case_ )
_UpperCAmelCase = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
_UpperCAmelCase = self.tokenizer(
snake_case_ , return_tensors=snake_case_ , padding="max_length" , max_length=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 22 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = 10
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : Tuple = [1, 2, 3, 4]
UpperCAmelCase : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__snake_case , self.block_size , 0 ) , __snake_case )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__snake_case , self.block_size , 0 ) , __snake_case )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__snake_case , self.block_size , 0 ) , __snake_case )
def A ( self : Dict ) -> Dict:
UpperCAmelCase : Tuple = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = process_story(__snake_case )
self.assertEqual(__snake_case , [] )
def A ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase , UpperCAmelCase : int = process_story(__snake_case )
self.assertEqual(__snake_case , [] )
self.assertEqual(__snake_case , [] )
def A ( self : Union[str, Any] ) -> str:
UpperCAmelCase : Dict = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = process_story(__snake_case )
UpperCAmelCase : Optional[int] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(__snake_case , __snake_case )
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[str] = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase : Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__snake_case , 0 ).numpy() , expected.numpy() )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__snake_case , 23 ).numpy() , expected.numpy() )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__snake_case , 1 ).numpy() , expected.numpy() )
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[int] = 101
UpperCAmelCase : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase : List[str] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase : Dict = compute_token_type_ids(__snake_case , __snake_case )
np.testing.assert_array_equal(__snake_case , __snake_case )
| 23 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase__ : str = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : List[Any] = bs[:]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : List[Any] = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case ,_snake_case ) )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Tuple = char
return pairs
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
SCREAMING_SNAKE_CASE__ : str = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : int = json.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : List[str] = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__ (self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __magic_name__ (self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : int = tuple(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = bigram
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
SCREAMING_SNAKE_CASE__ : Tuple = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : str = tuple(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = word
return word
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + """\n""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : List[Any] = """ """ + text
return (text, kwargs)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> int:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ : Any = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 25 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_snake_case = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_snake_case = concatenate_datasets
_snake_case = DownloadConfig
_snake_case = DownloadManager
_snake_case = DownloadMode
_snake_case = DownloadConfig
_snake_case = DownloadMode
_snake_case = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 26 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = UniSpeechSatForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = downstream_dict['projector.weight']
__a : Dict = downstream_dict['projector.bias']
__a : int = downstream_dict['model.post_net.linear.weight']
__a : List[str] = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ):
__a : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : Tuple = downstream_dict['model.linear.weight']
__a : str = downstream_dict['model.linear.bias']
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ):
__a : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : List[Any] = downstream_dict['connector.weight']
__a : Union[str, Any] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a : List[Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__a : str = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__a : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__a : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__a : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__a : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__a : List[str] = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : List[Any] = checkpoint['Downstream']
__a : Any = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Tuple = WavaVecaFeatureExtractor.from_pretrained(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE )
__a : str = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__a : Any = convert_classification(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif arch.endswith('ForAudioFrameClassification' ):
__a : Union[str, Any] = convert_diarization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif arch.endswith('ForXVector' ):
__a : List[Any] = convert_xvector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__a : int = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowercase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 27 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_lowerCamelCase : Any = parser.parse_args()
if args.model_type == "roberta":
_lowerCamelCase : str = RobertaForMaskedLM.from_pretrained(args.model_name)
_lowerCamelCase : List[Any] = "roberta"
elif args.model_type == "gpt2":
_lowerCamelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
_lowerCamelCase : List[Any] = "transformer"
_lowerCamelCase : Union[str, Any] = model.state_dict()
_lowerCamelCase : Optional[int] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_lowerCamelCase : List[str] = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_lowerCamelCase : Any = f'''{prefix}.embeddings.{w}.weight'''
_lowerCamelCase : int = state_dict[param_name]
for w in ["weight", "bias"]:
_lowerCamelCase : Tuple = f'''{prefix}.embeddings.LayerNorm.{w}'''
_lowerCamelCase : Any = state_dict[param_name]
# Transformer Blocks #
_lowerCamelCase : Union[str, Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_lowerCamelCase : int = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
_lowerCamelCase : Tuple = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_lowerCamelCase : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_lowerCamelCase : Any = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowerCamelCase : Optional[Any] = state_dict[f'''lm_head.dense.{w}''']
_lowerCamelCase : Tuple = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_lowerCamelCase : str = state_dict[f'''{prefix}.ln_f.{w}''']
_lowerCamelCase : Any = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 28 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = (DDIMParallelScheduler,)
_snake_case : List[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 5_0))
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCamelCase )
return config
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> int:
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(**_UpperCamelCase )
UpperCAmelCase_ : Dict = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = 1_0, 0.0
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self ) -> List[str]:
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCamelCase )
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : str = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
self.check_over_configs(thresholding=_UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> int:
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=_UpperCamelCase , num_inference_steps=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCamelCase , eta=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 1_0, 0.0
scheduler.set_timesteps(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
UpperCAmelCase_ : str = self.dummy_sample_deter + 0.1
UpperCAmelCase_ : List[Any] = self.dummy_sample_deter - 0.1
UpperCAmelCase_ : List[str] = samplea.shape[0]
UpperCAmelCase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ : str = torch.arange(_UpperCamelCase )[0:3, None].repeat(1 , _UpperCamelCase )
UpperCAmelCase_ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ : Dict = scheduler.batch_step_no_noise(_UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : List[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.full_loop()
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __UpperCAmelCase ( self ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : List[str] = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=_UpperCamelCase , beta_start=0.01 )
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_UpperCamelCase ) )
UpperCAmelCase_ : Optional[Any] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 29 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['PerceiverFeatureExtractor']
__a = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
def is_in_circle(_UpperCAmelCase : float , _UpperCAmelCase : float ) -> bool:
_UpperCAmelCase : Optional[int] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_UpperCAmelCase : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
_UpperCAmelCase : List[Any] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Callable[[float], float] , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_UpperCAmelCase , _UpperCAmelCase ) ) for _ in range(_UpperCAmelCase ) ) * (max_value - min_value)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_UpperCAmelCase : float ) -> float:
return x
_UpperCAmelCase : str = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> None:
"""simple docstring"""
def function_to_integrate(_UpperCAmelCase : float ) -> float:
return sqrt(4.0 - x * x )
_UpperCAmelCase : int = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> bool:
"""simple docstring"""
a_ : List[Any] = credit_card_number
a_ : Optional[int] = 0
a_ : str = len(__A ) - 2
for i in range(__A , -1 , -2 ):
# double the value of every second digit
a_ : Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a_ : List[Any] = cc_number[:i] + str(__A ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> bool:
"""simple docstring"""
a_ : List[Any] = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__A ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__A ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__A ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 32 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowercase_ : Optional[Any] = TapasConfig.from_json_file(__snake_case )
# set absolute/relative position embeddings parameter
lowercase_ : Any = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase_ : List[Any] = TapasForQuestionAnswering(config=__snake_case )
elif task == "WTQ":
# run_task_main.py hparams
lowercase_ : int = 4
lowercase_ : str = True
# hparam_utils.py hparams
lowercase_ : int = 0.664694
lowercase_ : Any = 0.207951
lowercase_ : List[Any] = 0.121194
lowercase_ : Optional[int] = True
lowercase_ : List[Any] = True
lowercase_ : Optional[Any] = False
lowercase_ : Optional[int] = 0.0352513
lowercase_ : str = TapasForQuestionAnswering(config=__snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase_ : Tuple = 4
lowercase_ : Optional[int] = False
# hparam_utils.py hparams
lowercase_ : Dict = 36.4519
lowercase_ : Tuple = 0.903421
lowercase_ : int = 222.088
lowercase_ : int = True
lowercase_ : List[str] = True
lowercase_ : Optional[Any] = True
lowercase_ : List[str] = 0.763141
lowercase_ : Any = TapasForQuestionAnswering(config=__snake_case )
elif task == "TABFACT":
lowercase_ : Dict = TapasForSequenceClassification(config=__snake_case )
elif task == "MLM":
lowercase_ : List[Any] = TapasForMaskedLM(config=__snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase_ : Optional[int] = TapasModel(config=__snake_case )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__snake_case , __snake_case , __snake_case )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__snake_case )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
lowercase_ : List[str] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''' , model_max_length=5_1_2 )
tokenizer.save_pretrained(__snake_case )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 33 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A =subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
A =subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
A ='|'.join(sys.argv[1:])
A =re.compile(rf"""^({joined_dirs}).*?\.py$""")
A =[x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 34 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str]=0 ):
snake_case__ : List[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
snake_case__ : Tuple = np.random.RandomState(snake_case_ )
snake_case__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : str ):
snake_case__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Dict = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
snake_case__ : List[Any] = pipe(**self.get_dummy_inputs() )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : str = pipe(**snake_case_ ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[int] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : int ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Tuple = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Any ):
snake_case__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Any = self.get_dummy_inputs()
snake_case__ : Optional[int] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Any = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : List[str] ):
snake_case__ : Union[str, Any] = ort.SessionOptions()
snake_case__ : List[str] = False
return options
def lowerCamelCase ( self : int ):
snake_case__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : List[str] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : List[str] = np.random.RandomState(0 )
snake_case__ : Optional[Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : List[Any] = output.images
snake_case__ : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Tuple = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self : Dict ):
snake_case__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : Any = init_image.resize((768, 512) )
snake_case__ : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : Any = np.random.RandomState(0 )
snake_case__ : List[Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : str = output.images
snake_case__ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 35 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=30, __a=400, __a=True, __a=None, __a=True, __a=1 / 255, __a=True, __a=[0.5, 0.5, 0.5], __a=[0.5, 0.5, 0.5], __a=True, ):
'''simple docstring'''
_lowerCAmelCase : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Any = min_resolution
_lowerCAmelCase : Tuple = max_resolution
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Any = size
_lowerCAmelCase : Union[str, Any] = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean
_lowerCAmelCase : Tuple = image_std
_lowerCAmelCase : Tuple = do_pad
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
if not batched:
_lowerCAmelCase : List[str] = image_inputs[0]
if isinstance(__a, Image.Image):
_lowerCAmelCase , _lowerCAmelCase : List[str] = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : Optional[int] = int(self.size["shortest_edge"] * h / w)
_lowerCAmelCase : List[Any] = self.size["shortest_edge"]
elif w > h:
_lowerCAmelCase : str = self.size["shortest_edge"]
_lowerCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * w / h)
else:
_lowerCAmelCase : Any = self.size["shortest_edge"]
_lowerCAmelCase : List[str] = self.size["shortest_edge"]
else:
_lowerCAmelCase : Optional[int] = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_lowerCAmelCase : List[str] = max(__a, key=lambda __a: item[0])[0]
_lowerCAmelCase : Union[str, Any] = max(__a, key=lambda __a: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = DetrImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DetrImageProcessingTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a, "image_mean"))
self.assertTrue(hasattr(__a, "image_std"))
self.assertTrue(hasattr(__a, "do_normalize"))
self.assertTrue(hasattr(__a, "do_rescale"))
self.assertTrue(hasattr(__a, "rescale_factor"))
self.assertTrue(hasattr(__a, "do_resize"))
self.assertTrue(hasattr(__a, "size"))
self.assertTrue(hasattr(__a, "do_pad"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad, __a)
_lowerCAmelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=__a)
self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad, __a)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a, Image.Image)
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(__a, batched=__a)
_lowerCAmelCase : Optional[Any] = image_processing(__a, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a, numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a, np.ndarray)
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase : List[Any] = image_processing(__a, return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : int = self.image_processor_tester.get_expected_values(__a, batched=__a)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=__a, torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a, torch.Tensor)
# Test not batched input
_lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(__a)
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
_lowerCAmelCase : Any = image_processing(__a, return_tensors="pt").pixel_values
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__a, batched=__a)
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
_lowerCAmelCase : int = json.loads(f.read())
_lowerCAmelCase : Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
_lowerCAmelCase : str = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
_lowerCAmelCase : str = image_processing(images=__a, annotations=__a, return_tensors="pt")
# verify pixel values
_lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, __a)
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __a, atol=1E-4))
# verify area
_lowerCAmelCase : int = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __a))
# verify boxes
_lowerCAmelCase : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, __a)
_lowerCAmelCase : int = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __a, atol=1E-3))
# verify image_id
_lowerCAmelCase : List[str] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __a))
# verify is_crowd
_lowerCAmelCase : int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __a))
# verify class_labels
_lowerCAmelCase : List[str] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __a))
# verify orig_size
_lowerCAmelCase : Optional[int] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __a))
# verify size
_lowerCAmelCase : Any = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __a))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
_lowerCAmelCase : Union[str, Any] = json.loads(f.read())
_lowerCAmelCase : Tuple = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
_lowerCAmelCase : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
_lowerCAmelCase : Optional[Any] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic")
_lowerCAmelCase : str = image_processing(images=__a, annotations=__a, masks_path=__a, return_tensors="pt")
# verify pixel values
_lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape, __a)
_lowerCAmelCase : str = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], __a, atol=1E-4))
# verify area
_lowerCAmelCase : List[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], __a))
# verify boxes
_lowerCAmelCase : Union[str, Any] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape, __a)
_lowerCAmelCase : List[str] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], __a, atol=1E-3))
# verify image_id
_lowerCAmelCase : List[str] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], __a))
# verify is_crowd
_lowerCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], __a))
# verify class_labels
_lowerCAmelCase : int = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], __a))
# verify masks
_lowerCAmelCase : Dict = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), __a)
# verify orig_size
_lowerCAmelCase : Tuple = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], __a))
# verify size
_lowerCAmelCase : Tuple = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], __a))
| 36 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase__ : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """openai/whisper-base"""
snake_case__ : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case__ : Any = """transcriber"""
snake_case__ : Optional[int] = WhisperProcessor
snake_case__ : str = WhisperForConditionalGeneration
snake_case__ : Optional[Any] = ["""audio"""]
snake_case__ : Any = ["""text"""]
def _A ( self : str , __lowerCamelCase : Dict ):
return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
return self.model.generate(inputs=__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : Optional[Any] ):
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 38 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : int = SwinvaConfig()
a : Any = swinva_name.split("_" )
a : Optional[int] = name_split[1]
if "to" in name_split[3]:
a : Optional[int] = int(name_split[3][-3:] )
else:
a : List[str] = int(name_split[3] )
if "to" in name_split[2]:
a : Optional[int] = int(name_split[2][-2:] )
else:
a : int = int(name_split[2][6:] )
if model_size == "tiny":
a : Optional[int] = 96
a : Any = (2, 2, 6, 2)
a : Dict = (3, 6, 12, 24)
elif model_size == "small":
a : List[str] = 96
a : Optional[int] = (2, 2, 18, 2)
a : Dict = (3, 6, 12, 24)
elif model_size == "base":
a : Optional[Any] = 128
a : Dict = (2, 2, 18, 2)
a : Any = (4, 8, 16, 32)
else:
a : List[Any] = 192
a : Any = (2, 2, 18, 2)
a : Any = (6, 12, 24, 48)
if "to" in swinva_name:
a : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
a : Tuple = 21_841
a : Union[str, Any] = "huggingface/label-files"
a : List[Any] = "imagenet-22k-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : List[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Union[str, Any] = idalabel
a : Tuple = {v: k for k, v in idalabel.items()}
else:
a : Union[str, Any] = 1_000
a : Union[str, Any] = "huggingface/label-files"
a : int = "imagenet-1k-id2label.json"
a : Optional[Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Any = {int(A_ ): v for k, v in idalabel.items()}
a : int = idalabel
a : Optional[Any] = {v: k for k, v in idalabel.items()}
a : str = img_size
a : Union[str, Any] = num_classes
a : Union[str, Any] = embed_dim
a : Optional[Any] = depths
a : Optional[int] = num_heads
a : int = window_size
return config
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
a : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : int = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : Optional[Any] = "encoder." + name
if "attn.proj" in name:
a : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : List[Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
a : Dict = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
a : Any = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
a : List[str] = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
a : Tuple = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
a : int = "layernorm.weight"
if name == "norm.bias":
a : int = "layernorm.bias"
if "head" in name:
a : List[Any] = name.replace("head" , "classifier" )
else:
a : List[Any] = "swinv2." + name
return name
def lowercase ( A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : List[Any] = orig_state_dict.pop(A_ )
if "mask" in key:
continue
elif "qkv" in key:
a : List[str] = key.split("." )
a : Optional[int] = int(key_split[1] )
a : Union[str, Any] = int(key_split[3] )
a : Tuple = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : int = val[:dim, :]
a : Union[str, Any] = val[dim : dim * 2, :]
a : List[str] = val[-dim:, :]
else:
a : str = val[:dim]
a : str = val[
dim : dim * 2
]
a : str = val[-dim:]
else:
a : List[Any] = val
return orig_state_dict
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : Union[str, Any] = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
a : str = get_swinva_config(A_ )
a : Dict = SwinvaForImageClassification(A_ )
model.eval()
a : Tuple = convert_state_dict(timm_model.state_dict() , A_ )
model.load_state_dict(A_ )
a : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
a : Optional[int] = Image.open(requests.get(A_ , stream=A_ ).raw )
a : int = image_processor(images=A_ , return_tensors="pt" )
a : Tuple = timm_model(inputs["pixel_values"] )
a : Optional[Any] = model(**A_ ).logits
assert torch.allclose(A_ , A_ , atol=1e-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
model.push_to_hub(
repo_path_or_name=Path(A_ , A_ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 40 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class _lowercase ( _lowercase ):
def __init__( self: List[str] , *UpperCamelCase__: List[Any] , **UpperCamelCase__: List[Any] ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Any = {}
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , *UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : str = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Optional[Any] , *UpperCamelCase__: List[Any] , UpperCamelCase__: Dict=1 , **UpperCamelCase__: int ):
lowerCamelCase__ : Dict = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
lowerCamelCase__ : Any = []
for i in range(UpperCamelCase__ ):
lowerCamelCase__ : Dict = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase__ : Tuple = output
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Any=1.0 ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : int = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase__ : Optional[Any] = self.token_map[placeholder_token]
lowerCamelCase__ : Tuple = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase__ : List[str] = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) )
return text
def __call__( self: str , UpperCamelCase__: Optional[Any] , *UpperCamelCase__: List[Any] , UpperCamelCase__: str=False , UpperCamelCase__: Optional[int]=1.0 , **UpperCamelCase__: Optional[Any] ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] , *UpperCamelCase__: str , UpperCamelCase__: Tuple=False , UpperCamelCase__: Tuple=1.0 , **UpperCamelCase__: List[Any] ):
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = ''''''
for word_or_phrase in separated:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
_a : Tuple = logging.getLogger(__name__)
@dataclass
class __A :
_UpperCamelCase : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __A :
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Train language if it is different from the evaluation language."} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" ,_lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : int = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_lowerCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowerCAmelCase : Any = load_dataset(
"""xnli""" ,model_args.language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
_lowerCAmelCase : Dict = load_dataset(
"""xnli""" ,model_args.train_language ,split="""train""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : Tuple = train_dataset.features["""label"""].names
if training_args.do_eval:
_lowerCAmelCase : List[str] = load_dataset(
"""xnli""" ,model_args.language ,split="""validation""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : List[Any] = eval_dataset.features["""label"""].names
if training_args.do_predict:
_lowerCAmelCase : Union[str, Any] = load_dataset(
"""xnli""" ,model_args.language ,split="""test""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : Union[str, Any] = predict_dataset.features["""label"""].names
# Labels
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_lowerCamelCase ,idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )} ,labelaid={label: i for i, label in enumerate(_lowerCamelCase )} ,finetuning_task="""xnli""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_lowerCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : Optional[int] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : List[str] = False
def preprocess_function(_lowerCamelCase : Dict ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] ,examples["""hypothesis"""] ,padding=_lowerCamelCase ,max_length=data_args.max_seq_length ,truncation=_lowerCamelCase ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_lowerCamelCase ) ,data_args.max_train_samples )
_lowerCAmelCase : str = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
_lowerCAmelCase : Optional[int] = train_dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on train dataset""" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Optional[Any] = min(len(_lowerCamelCase ) ,data_args.max_eval_samples )
_lowerCAmelCase : str = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
_lowerCAmelCase : List[str] = eval_dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on validation dataset""" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowerCAmelCase : Optional[Any] = min(len(_lowerCamelCase ) ,data_args.max_predict_samples )
_lowerCAmelCase : int = predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
_lowerCAmelCase : Tuple = predict_dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on prediction dataset""" ,)
# Get the metric function
_lowerCAmelCase : Optional[Any] = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction ):
_lowerCAmelCase : str = p.predictions[0] if isinstance(p.predictions ,_lowerCamelCase ) else p.predictions
_lowerCAmelCase : str = np.argmax(_lowerCamelCase ,axis=1 )
return metric.compute(predictions=_lowerCamelCase ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : List[str] = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : List[str] = DataCollatorWithPadding(_lowerCamelCase ,pad_to_multiple_of=8 )
else:
_lowerCAmelCase : List[Any] = None
# Initialize our Trainer
_lowerCAmelCase : Union[str, Any] = Trainer(
model=_lowerCamelCase ,args=_lowerCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=_lowerCamelCase ,tokenizer=_lowerCamelCase ,data_collator=_lowerCamelCase ,)
# Training
if training_args.do_train:
_lowerCAmelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : int = last_checkpoint
_lowerCAmelCase : List[str] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
_lowerCAmelCase : Dict = train_result.metrics
_lowerCAmelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : List[str] = min(_lowerCamelCase ,len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" ,_lowerCamelCase )
trainer.save_metrics("""train""" ,_lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase : Tuple = trainer.evaluate(eval_dataset=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : Any = min(_lowerCamelCase ,len(_lowerCamelCase ) )
trainer.log_metrics("""eval""" ,_lowerCamelCase )
trainer.save_metrics("""eval""" ,_lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = trainer.predict(_lowerCamelCase ,metric_key_prefix="""predict""" )
_lowerCAmelCase : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Dict = min(_lowerCamelCase ,len(_lowerCamelCase ) )
trainer.log_metrics("""predict""" ,_lowerCamelCase )
trainer.save_metrics("""predict""" ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.argmax(_lowerCamelCase ,axis=1 )
_lowerCAmelCase : Optional[Any] = os.path.join(training_args.output_dir ,"""predictions.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase ,"""w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Tuple = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
__a = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
__a , __a = [i[0] for i in r], [i[1] for i in r]
__a = list(accumulate(lowerCAmelCase__ ) )
__a = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def A ( *_a : Tuple , **_a : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase ):
A__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def A ( self : str , _a : Union[str, Any] , _a : Optional[Any] , _a : Tuple ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def A ( self : List[Any] , _a : int , _a : List[Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
import datasets
_SCREAMING_SNAKE_CASE =datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_SCREAMING_SNAKE_CASE =[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_SCREAMING_SNAKE_CASE =object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def A ( self : Tuple ) -> int:
'''simple docstring'''
pass
@require_torch
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='hf-internal-testing/tiny-detr-mobilenetsv3'
_SCREAMING_SNAKE_CASE =AutoModelForObjectDetection.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoFeatureExtractor.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_SCREAMING_SNAKE_CASE =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.33_76, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='facebook/detr-resnet-50'
_SCREAMING_SNAKE_CASE =AutoModelForObjectDetection.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoFeatureExtractor.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_SCREAMING_SNAKE_CASE =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='facebook/detr-resnet-50'
_SCREAMING_SNAKE_CASE =pipeline('object-detection' , model=_a )
_SCREAMING_SNAKE_CASE =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.99_82, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.99_60, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.99_55, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def A ( self : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0.99_85
_SCREAMING_SNAKE_CASE ='facebook/detr-resnet-50'
_SCREAMING_SNAKE_CASE =pipeline('object-detection' , model=_a )
_SCREAMING_SNAKE_CASE =object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.99_88, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.99_87, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def A ( self : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='Narsil/layoutlmv3-finetuned-funsd'
_SCREAMING_SNAKE_CASE =0.99_93
_SCREAMING_SNAKE_CASE =pipeline('object-detection' , model=_a , threshold=_a )
_SCREAMING_SNAKE_CASE =object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.99_93, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 47 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
from __future__ import annotations
import numpy as np
def __snake_case ( _UpperCAmelCase ):
return np.maximum(0 , _UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 49 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 0 |
from timeit import timeit
_UpperCAmelCase : Union[str, Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
lowerCamelCase__ : Dict = len(_UpperCAmelCase ) // 2
lowerCamelCase__ : Tuple = len(_UpperCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if len(_UpperCAmelCase ) <= 2:
return True
if s[0] == s[len(_UpperCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return s == s[::-1]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : Any = F"""all({name}(key) is value for key, value in test_data.items())"""
lowerCamelCase__ : List[Any] = F"""from __main__ import test_data, {name}"""
lowerCamelCase__ : int = 50_0000
lowerCamelCase__ : List[str] = timeit(stmt=_UpperCAmelCase , setup=_UpperCAmelCase , number=_UpperCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 50 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(
metadata={"help": "The output directory where the model will be written."} , )
SCREAMING_SNAKE_CASE_ : str =field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : str =field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def lowercase__ ( ) -> Any:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments,) )
((__UpperCamelCase) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowercase , decoder_config=__lowercase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__UpperCamelCase = decoder_config.decoder_start_token_id
__UpperCamelCase = decoder_config.pad_token_id
if decoder_start_token_id is None:
__UpperCamelCase = decoder_config.bos_token_id
if pad_token_id is None:
__UpperCamelCase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__UpperCamelCase = decoder_config.eos_token_id
__UpperCamelCase = decoder_start_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__UpperCamelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 53 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a__ : List[str] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : float ):
return 10 - x * x
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) >= 0:
raise ValueError("Wrong space!" )
lowerCamelCase_ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) < 0:
lowerCamelCase_ = c
else:
lowerCamelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 55 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a ( _lowerCamelCase ):
def A_ ( self : Dict , lowercase_ : float ):
return 0.0
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple[int | float, int | float]:
'''simple docstring'''
snake_case_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
snake_case_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
snake_case_ = 512
snake_case_ = [1] + [0] * (size - 1)
snake_case_ = [filter_type.process(__UpperCAmelCase ) for item in inputs]
snake_case_ = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case_ = np.abs(np.fft.fft(__UpperCAmelCase ) )
snake_case_ = 20 * np.logaa(__UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
snake_case_ = get_bounds(__UpperCAmelCase, __UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__UpperCAmelCase )
plt.show()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> None:
'''simple docstring'''
snake_case_ = 512
snake_case_ = [1] + [0] * (size - 1)
snake_case_ = [filter_type.process(__UpperCAmelCase ) for item in inputs]
snake_case_ = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case_ = np.angle(np.fft.fft(__UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__UpperCAmelCase, -2 * pi ) )
plt.show()
| 56 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =AltDiffusionPipeline
__UpperCAmelCase : str =TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__lowerCAmelCase = CLIPTextModel(__a )
__lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__lowerCAmelCase = 77
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self , __a , __a=0 ):
if str(__a ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(__a )
else:
__lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(__a )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = self.get_dummy_inputs(__a )
__lowerCAmelCase = "A photo of an astronaut"
__lowerCAmelCase = alt_pipe(**__a )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(__a )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = self.get_dummy_inputs(__a )
__lowerCAmelCase = alt_pipe(**__a )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=__a , safety_checker=__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="numpy" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 57 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( __lowerCamelCase : list[list[int]] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_SCREAMING_SNAKE_CASE = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_SCREAMING_SNAKE_CASE = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowerCamelCase )
return next_generation
def lowerCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) ->list[Image.Image]:
_SCREAMING_SNAKE_CASE = []
for _ in range(__lowerCamelCase ):
# Create output image
_SCREAMING_SNAKE_CASE = Image.new("""RGB""" , (len(cells[0] ), len(__lowerCamelCase )) )
_SCREAMING_SNAKE_CASE = img.load()
# Save cells to image
for x in range(len(__lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
_SCREAMING_SNAKE_CASE = 255 - cells[y][x] * 255
_SCREAMING_SNAKE_CASE = (colour, colour, colour)
# Save image
images.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = new_generation(__lowerCamelCase )
return images
if __name__ == "__main__":
lowercase_ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 58 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowerCamelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__lowerCamelCase = get_tests_dir("""fixtures/vocab.json""")
__lowerCamelCase = get_tests_dir("""fixtures""")
class UpperCAmelCase ( unittest.TestCase ):
A__ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case : str = 0
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : List[Any] = WavaVecaConfig()
snake_case : Any = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
snake_case : Any = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) )
snake_case : List[str] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : int = WavaVecaFeatureExtractor()
snake_case : Any = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
snake_case : List[str] = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f:
snake_case : int = json.load(snake_case__ )
config_dict.pop("processor_class" )
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write(json.dumps(snake_case__ ) )
snake_case : Tuple = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : int = WavaVecaFeatureExtractor()
snake_case : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
snake_case : Dict = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f:
snake_case : List[str] = json.load(snake_case__ )
config_dict.pop("processor_class" )
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write(json.dumps(snake_case__ ) )
snake_case : Union[str, Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write("{}" )
snake_case : Union[str, Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
snake_case : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
snake_case : List[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
snake_case : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
snake_case : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
snake_case : str = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
snake_case : Dict = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case : List[Any] = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : List[str] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : Optional[int] = CustomTokenizer(snake_case__ )
snake_case : List[Any] = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
snake_case : str = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
class UpperCAmelCase ( A_ ):
A__ : Tuple = False
class UpperCAmelCase ( A_ ):
A__ : Tuple = False
class UpperCAmelCase ( A_ ):
A__ : Dict = "AutoFeatureExtractor"
A__ : str = "AutoTokenizer"
A__ : Dict = False
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
snake_case : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case : Optional[int] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case : Optional[int] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : str = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
A__ : Dict = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] ) -> Tuple:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case : Dict = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , "test-processor" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Tuple = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , "test-processor-org" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="valid_org" , )
snake_case : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _SCREAMING_SNAKE_CASE (self : int ) -> int:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case : List[Any] = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Any = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case : str = CustomTokenizer(snake_case__ )
snake_case : Tuple = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
snake_case : Optional[Any] = Repository(snake_case__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) ) as f:
snake_case : List[str] = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_processing.py" ) ) )
repo.push_to_hub()
snake_case : int = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 59 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _snake_case ( _snake_case : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = analyze_text(_snake_case )
lowerCAmelCase : Dict = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase : int = sum(single_char_strings.values() )
# one length string
lowerCAmelCase : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase : str = single_char_strings[ch]
lowerCAmelCase : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCAmelCase : Dict = sum(two_char_strings.values() )
lowerCAmelCase : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase : Any = cha + cha
if sequence in two_char_strings:
lowerCAmelCase : int = two_char_strings[sequence]
lowerCAmelCase : Union[str, Any] = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _snake_case ( _snake_case : str ):
lowerCAmelCase : Union[str, Any] = Counter() # type: ignore
lowerCAmelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _snake_case ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 255 , lowercase_=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Tuple = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : str = do_normalize
UpperCAmelCase_ : Any = image_mean
UpperCAmelCase_ : int = image_std
UpperCAmelCase_ : Union[str, Any] = do_rescale
UpperCAmelCase_ : str = rescale_factor
UpperCAmelCase_ : Tuple = do_pad
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , lowercase_ , lowercase_=False ):
"""simple docstring"""
if not batched:
UpperCAmelCase_ : Dict = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : Any = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase_ : Optional[Any] = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase_ : List[Any] = self.size["shortest_edge"]
UpperCAmelCase_ : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase_ : Dict = self.size["shortest_edge"]
UpperCAmelCase_ : Tuple = self.size["shortest_edge"]
else:
UpperCAmelCase_ : List[str] = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase_ : List[Any] = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = DetaImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "image_mean" ) )
self.assertTrue(hasattr(lowercase_ , "image_std" ) )
self.assertTrue(hasattr(lowercase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "do_rescale" ) )
self.assertTrue(hasattr(lowercase_ , "do_pad" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase_ : Optional[int] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image and target
UpperCAmelCase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase_ : Optional[Any] = json.loads(f.read() )
UpperCAmelCase_ : int = {"image_id": 3_9769, "annotations": target}
# encode them
UpperCAmelCase_ : Tuple = DetaImageProcessor()
UpperCAmelCase_ : Dict = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase_ )
UpperCAmelCase_ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase_ : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) )
# verify boxes
UpperCAmelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ )
UpperCAmelCase_ : Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ : str = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) )
# verify class_labels
UpperCAmelCase_ : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) )
# verify orig_size
UpperCAmelCase_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) )
# verify size
UpperCAmelCase_ : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase_ : str = json.loads(f.read() )
UpperCAmelCase_ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
UpperCAmelCase_ : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase_ : Dict = DetaImageProcessor(format="coco_panoptic" )
UpperCAmelCase_ : int = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase_ )
UpperCAmelCase_ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase_ : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) )
# verify boxes
UpperCAmelCase_ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ )
UpperCAmelCase_ : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase_ : str = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) )
# verify class_labels
UpperCAmelCase_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) )
# verify masks
UpperCAmelCase_ : Any = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase_ )
# verify orig_size
UpperCAmelCase_ : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) )
# verify size
UpperCAmelCase_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
| 61 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
metadata={"help": "The csv file to plot."} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
UpperCAmelCase__ : Optional[List[str]] = list_field(
default=A_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
try:
int(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
try:
float(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> int:
__UpperCamelCase =args
__UpperCamelCase =defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__UpperCamelCase =csv.DictReader(A_ )
for row in reader:
__UpperCamelCase =row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__UpperCamelCase =int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__UpperCamelCase =float(row['result'] )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =plt.subplots()
__UpperCamelCase ='Time usage' if self.args.is_time else 'Memory usage'
__UpperCamelCase =title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__UpperCamelCase =sorted(set(self.result_dict[model_name]['bsz'] ) )
__UpperCamelCase =sorted(set(self.result_dict[model_name]['seq_len'] ) )
__UpperCamelCase =self.result_dict[model_name]['result']
((__UpperCamelCase) , (__UpperCamelCase)) =(
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__UpperCamelCase =(
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__UpperCamelCase =np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=A_ , )
else:
__UpperCamelCase =np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__UpperCamelCase) , (__UpperCamelCase)) =(
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__UpperCamelCase =np.asarray(A_ , A_ )[: len(A_ )]
plt.scatter(
A_ , A_ , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(A_ , A_ , '--' )
title_str += f' {label_model_name} vs.'
__UpperCamelCase =title_str[:-4]
__UpperCamelCase ='Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(A_ )
plt.xlabel(A_ )
plt.ylabel(A_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _UpperCAmelCase ( ):
__UpperCamelCase =HfArgumentParser(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =parser.parse_args_into_dataclasses()[0]
__UpperCamelCase =Plot(args=SCREAMING_SNAKE_CASE__ )
plot.plot()
if __name__ == "__main__":
main()
| 62 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ : Any = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "markuplm"
def __init__( self: Optional[Any], a_: List[str]=30_522, a_: List[str]=768, a_: Optional[int]=12, a_: List[str]=12, a_: Optional[int]=3_072, a_: str="gelu", a_: str=0.1, a_: int=0.1, a_: Dict=512, a_: List[Any]=2, a_: List[Any]=0.02, a_: Optional[Any]=1E-12, a_: Any=0, a_: Union[str, Any]=0, a_: int=2, a_: Union[str, Any]=256, a_: Tuple=1_024, a_: str=216, a_: str=1_001, a_: str=32, a_: Optional[Any]=50, a_: List[str]="absolute", a_: Dict=True, a_: int=None, **a_: Any, ):
'''simple docstring'''
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_, )
_snake_case : Optional[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[Any] = hidden_act
_snake_case : List[Any] = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : str = position_embedding_type
_snake_case : List[Any] = use_cache
_snake_case : Any = classifier_dropout
# additional properties
_snake_case : Dict = max_depth
_snake_case : str = max_xpath_tag_unit_embeddings
_snake_case : str = max_xpath_subs_unit_embeddings
_snake_case : Union[str, Any] = tag_pad_id
_snake_case : int = subs_pad_id
_snake_case : List[Any] = xpath_unit_hidden_size
| 64 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 0 |
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
def run_func(UpperCamelCase__ ):
@wraps(UpperCamelCase__ )
def run_in_eager_mode(*UpperCamelCase__ , **UpperCamelCase__ ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
@wraps(UpperCamelCase__ )
@tf.function(experimental_compile=UpperCamelCase__ )
def run_in_graph_mode(*UpperCamelCase__ , **UpperCamelCase__ ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> ["tf.Tensor"]:
__lowerCamelCase = random.Random()
__lowerCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str ="TensorFlow"
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return tf.__version__
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : str , a : int , a : int ):
"""simple docstring"""
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_inference_func(a , a , a )
return self._measure_speed(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : str , a : int , a : int ):
"""simple docstring"""
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_train_func(a , a , a )
return self._measure_speed(_train )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : str , a : int , a : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a )
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_inference_func(a , a , a )
return self._measure_memory(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : str , a : int , a : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a )
__lowerCamelCase = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__lowerCamelCase = self._prepare_train_func(a , a , a )
return self._measure_memory(_train )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : str , a : int , a : int ):
"""simple docstring"""
__lowerCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__lowerCamelCase = (
hasattr(a , '''architectures''' )
and isinstance(config.architectures , a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
__lowerCamelCase = getattr(a , a )
__lowerCamelCase = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__lowerCamelCase = TF_MODEL_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
__lowerCamelCase = config.vocab_size if hasattr(a , '''vocab_size''' ) else config.encoder.vocab_size
__lowerCamelCase = random_input_ids(a , a , a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a , decoder_input_ids=a , training=a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a , training=a )
__lowerCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE__ ( self : Any , a : str , a : int , a : int ):
"""simple docstring"""
__lowerCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__lowerCamelCase = (
hasattr(a , '''architectures''' )
and isinstance(config.architectures , a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase = __import__('''transformers''' , fromlist=[model_class] )
__lowerCamelCase = getattr(a , a )
__lowerCamelCase = model_cls(a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__lowerCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
__lowerCamelCase = config.vocab_size if hasattr(a , '''vocab_size''' ) else config.encoder.vocab_size
__lowerCamelCase = random_input_ids(a , a , a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowerCamelCase = model(a , decoder_input_ids=a , labels=a , training=a )[0]
__lowerCamelCase = tf.gradients(a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowerCamelCase = model(a , labels=a , training=a )[0]
__lowerCamelCase = tf.gradients(a , model.trainable_variables )
return gradients
__lowerCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : List[str] ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowerCamelCase = timeit.repeat(
a , repeat=self.args.repeat , number=10 , )
return min(a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Callable[[], None] ):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
__lowerCamelCase = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
__lowerCamelCase = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
__lowerCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowerCamelCase = nvml.nvmlDeviceGetMemoryInfo(a )
__lowerCamelCase = meminfo.used
__lowerCamelCase = Memory(a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
__lowerCamelCase = None
else:
__lowerCamelCase = measure_peak_memory_cpu(a )
__lowerCamelCase = Memory(a ) if isinstance(a , a ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowerCamelCase = stop_memory_tracing(a )
if memory is None:
__lowerCamelCase = summary.total
else:
__lowerCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 67 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=False , ) -> List[Any]:
'''simple docstring'''
A__ = size if size is not None else {"height": 20, "width": 20}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_reduce_labels
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(dataset[0]["file"] )
A__ = Image.open(dataset[1]["file"] )
return image, map
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(ds[0]["file"] )
A__ = Image.open(ds[1]["file"] )
A__ = Image.open(ds[2]["file"] )
A__ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = BeitImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "center_crop" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
A__ = []
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A__ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
A__ , A__ = prepare_semantic_batch_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A__ , A__ = prepare_semantic_single_inputs()
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
A__ = True
A__ = image_processing(lowercase , lowercase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 68 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCamelCase = 0
__UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCamelCase = tuple[int, int]
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> None:
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = g_cost
snake_case_ = parent
snake_case_ = self.calculate_heuristic()
snake_case_ = self.g_cost + self.h_cost
def a_ ( self) -> float:
snake_case_ = self.pos_x - self.goal_x
snake_case_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase__) + abs(lowerCAmelCase__)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self, lowerCAmelCase__) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = Node(start[1], start[0], goal[1], goal[0], 0, lowerCAmelCase__)
snake_case_ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCAmelCase__)
snake_case_ = [self.start]
snake_case_ = []
snake_case_ = False
def a_ ( self) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase__)
self.closed_nodes.append(lowerCAmelCase__)
snake_case_ = self.get_successors(lowerCAmelCase__)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__)
else:
# retrieve the best current path
snake_case_ = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__)
else:
self.open_nodes.append(lowerCAmelCase__)
return [self.start.pos]
def a_ ( self, lowerCAmelCase__) -> list[Node]:
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCAmelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__, lowerCAmelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCAmelCase__, ))
return successors
def a_ ( self, lowerCAmelCase__) -> list[TPosition]:
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case_ = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = False
def a_ ( self) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case_ = self.fwd_astar.open_nodes.pop(0)
snake_case_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase__, lowerCAmelCase__)
self.fwd_astar.closed_nodes.append(lowerCAmelCase__)
self.bwd_astar.closed_nodes.append(lowerCAmelCase__)
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase__)
else:
# retrieve the best current path
snake_case_ = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase__))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase__)
else:
astar.open_nodes.append(lowerCAmelCase__)
return [self.fwd_astar.start.pos]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> list[TPosition]:
snake_case_ = self.fwd_astar.retrace_path(lowerCAmelCase__)
snake_case_ = self.bwd_astar.retrace_path(lowerCAmelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCamelCase = (0, 0)
__UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCamelCase = time.time()
__UpperCamelCase = AStar(init, goal)
__UpperCamelCase = a_star.search()
__UpperCamelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__UpperCamelCase = time.time()
__UpperCamelCase = BidirectionalAStar(init, goal)
__UpperCamelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 69 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : str , **__snake_case : List[Any] ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@require_torch
def lowercase__ ( self : List[Any] ) -> Any:
_lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
] , )
@require_tf
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__snake_case ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
] , )
@slow
@require_torch
def lowercase__ ( self : List[str] ) -> List[Any]:
_lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowercase__ ( self : Dict ) -> Tuple:
_lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 70 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =DDIMPipeline
UpperCamelCase__ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : Tuple =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Any =False
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__UpperCamelCase : int =DDIMScheduler()
__UpperCamelCase : Optional[int] ={'unet': unet, 'scheduler': scheduler}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : str =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu'
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : Tuple =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : int =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__UpperCamelCase : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__UpperCamelCase : Tuple =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='google/ddpm-cifar10-32'
__UpperCamelCase : str =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =DDIMScheduler()
__UpperCamelCase : List[Any] =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddim.to(lowerCamelCase__ )
ddim.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : List[str] =ddim(generator=lowerCamelCase__ , eta=0.0 , output_type='numpy' ).images
__UpperCamelCase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : str =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='google/ddpm-ema-bedroom-256'
__UpperCamelCase : Any =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =DDIMScheduler.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Dict =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddpm.to(lowerCamelCase__ )
ddpm.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =ddpm(generator=lowerCamelCase__ , output_type='numpy' ).images
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase : Optional[Any] =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 71 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( A_ : Union[str, Any], A_ : Dict, A_ : Any, A_ : Optional[int] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowerCamelCase : List[str] = TOKENIZER_CLASSES
else:
_lowerCamelCase : List[str] = {tokenizer_name: getattr(A_, tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowerCamelCase : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
_lowerCamelCase : List[str] = True
if checkpoint_name is None:
_lowerCamelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCamelCase : List[str] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowerCamelCase : int = tokenizer_class.from_pretrained(A_, force_download=A_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = checkpoint.split('''/''' )
_lowerCamelCase : Dict = os.path.join(A_, A_ )
elif add_prefix:
_lowerCamelCase : List[Any] = checkpoint
_lowerCamelCase : str = dump_path
else:
_lowerCamelCase : str = None
_lowerCamelCase : List[str] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCamelCase : int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCamelCase : Union[str, Any] = file_path.split(A_ )[-1][0]
if next_char == "/":
_lowerCamelCase : Any = os.path.join(A_, A_ )
_lowerCamelCase : Union[str, Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowerCamelCase : Union[str, Any] = tokenizer.save_pretrained(
A_, legacy_format=A_, filename_prefix=A_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(A_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 72 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp
| 74 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a_ ( __snake_case : BertModel , __snake_case : str , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowerCamelCase_ =(
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
lowerCamelCase_ =model.state_dict()
def to_tf_var_name(__snake_case : str ):
for patt, repl in iter(__snake_case ):
lowerCamelCase_ =name.replace(__snake_case , __snake_case )
return F'''bert/{name}'''
def create_tf_var(__snake_case : np.ndarray , __snake_case : str , __snake_case : tf.Session ):
lowerCamelCase_ =tf.dtypes.as_dtype(tensor.dtype )
lowerCamelCase_ =tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowerCamelCase_ =to_tf_var_name(__snake_case )
lowerCamelCase_ =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowerCamelCase_ =torch_tensor.T
lowerCamelCase_ =create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
lowerCamelCase_ =session.run(__snake_case )
print(F'''Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}''' )
lowerCamelCase_ =tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a_ ( __snake_case : Union[str, Any]=None ) -> Any:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' )
lowerCamelCase_ =parser.parse_args(__snake_case )
lowerCamelCase_ =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 76 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
import random
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = num - 1
lowercase__ : Optional[int] = 0
while s % 2 == 0:
lowercase__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
lowercase__ : List[str] = random.randrange(2 , num - 1 )
lowercase__ : Any = pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if v != 1:
lowercase__ : Optional[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase__ : str = i + 1
lowercase__ : Optional[Any] = (v**2) % num
return True
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if num < 2:
return False
lowercase__ : Dict = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCAmelCase )
def a_ ( _lowerCAmelCase : int = 1024 ):
'''simple docstring'''
while True:
lowercase__ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCAmelCase ):
return num
if __name__ == "__main__":
_UpperCamelCase : Any = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 77 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if len(lowercase_ ) < 2:
return collection
def circle_sort_util(lowercase_ , lowercase_ , lowercase_ ) -> bool:
UpperCAmelCase = False
if low == high:
return swapped
UpperCAmelCase = low
UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
UpperCAmelCase , UpperCAmelCase = (
collection[right],
collection[left],
)
UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCAmelCase , UpperCAmelCase = (
collection[right + 1],
collection[left],
)
UpperCAmelCase = True
UpperCAmelCase = low + int((high - low) / 2 )
UpperCAmelCase = circle_sort_util(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = circle_sort_util(lowercase_ , mid + 1 , lowercase_ )
return swapped or left_swap or right_swap
UpperCAmelCase = True
while is_not_sorted is True:
UpperCAmelCase = circle_sort_util(lowercase_ , 0 , len(lowercase_ ) - 1 )
return collection
if __name__ == "__main__":
snake_case_ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case_ = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 78 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_A = np.concatenate(__lowercase , axis=0 )
_A = np.array(__lowercase ).astype(np.floataa ) / 255.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(__lowercase , dim=0 )
return image
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=0.9995 ) -> Any:
'''simple docstring'''
if not isinstance(__lowercase , np.ndarray ):
_A = True
_A = va.device
_A = va.cpu().numpy()
_A = va.cpu().numpy()
_A = np.sum(va * va / (np.linalg.norm(__lowercase ) * np.linalg.norm(__lowercase )) )
if np.abs(__lowercase ) > DOT_THRESHOLD:
_A = (1 - t) * va + t * va
else:
_A = np.arccos(__lowercase )
_A = np.sin(__lowercase )
_A = theta_a * t
_A = np.sin(__lowercase )
_A = np.sin(theta_a - theta_t ) / sin_theta_a
_A = sin_theta_t / sin_theta_a
_A = sa * va + sa * va
if inputs_are_torch:
_A = torch.from_numpy(__lowercase ).to(__lowercase )
return va
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = F.normalize(__lowercase , dim=-1 )
_A = F.normalize(__lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __lowercase ( __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
for param in model.parameters():
_A = value
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __UpperCAmelCase : CLIPFeatureExtractor , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Tuple=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , clip_model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , coca_model=__UpperCAmelCase , coca_tokenizer=__UpperCAmelCase , coca_transform=__UpperCAmelCase , )
_A = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCAmelCase )
else feature_extractor.size["shortest_edge"]
)
_A = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCAmelCase )
set_requires_grad(self.clip_model , __UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = min(int(num_inference_steps * strength ) , __UpperCAmelCase )
_A = max(num_inference_steps - init_timestep , 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Dict=None ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(__UpperCAmelCase )}''' )
_A = image.to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCAmelCase )
]
_A = torch.cat(__UpperCAmelCase , dim=0 )
else:
_A = self.vae.encode(__UpperCAmelCase ).latent_dist.sample(__UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 0.18215 * init_latents
_A = init_latents.repeat_interleave(__UpperCAmelCase , dim=0 )
_A = randn_tensor(init_latents.shape , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
# get latents
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = init_latents
return latents
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = self.coca_transform(__UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_A = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_A = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.feature_extractor.preprocess(__UpperCAmelCase )
_A = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_A = self.clip_model.get_image_features(__UpperCAmelCase )
_A = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCAmelCase )
_A = image_embeddings_clip.repeat_interleave(__UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = latents.detach().requires_grad_()
_A = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_A = self.scheduler.alphas_cumprod[timestep]
_A = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_A = torch.sqrt(__UpperCAmelCase )
_A = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCAmelCase ):
_A = self.scheduler.sigmas[index]
_A = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 1 / 0.18215 * sample
_A = self.vae.decode(__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = transforms.Resize(self.feature_extractor_size )(__UpperCAmelCase )
_A = self.normalize(__UpperCAmelCase ).to(latents.dtype )
_A = self.clip_model.get_image_features(__UpperCAmelCase )
_A = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCAmelCase )
_A = spherical_dist_loss(__UpperCAmelCase , __UpperCAmelCase ).mean() * clip_guidance_scale
_A = -torch.autograd.grad(__UpperCAmelCase , __UpperCAmelCase )[0]
if isinstance(self.scheduler , __UpperCAmelCase ):
_A = latents.detach() + grads * (sigma**2)
_A = noise_pred_original
else:
_A = noise_pred_original - torch.sqrt(__UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[int] = 512 , __UpperCAmelCase : Optional[int] = 512 , __UpperCAmelCase : float = 0.6 , __UpperCAmelCase : Optional[int] = 50 , __UpperCAmelCase : Optional[float] = 7.5 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[float] = 100 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : float = 0.8 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(__UpperCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__UpperCAmelCase , torch.Generator ) and batch_size > 1:
_A = [generator] + [None] * (batch_size - 1)
_A = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_A = [x[0] for x in coca_is_none if x[1]]
_A = ", ".join(__UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCAmelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_A = self.get_image_description(__UpperCAmelCase )
if style_prompt is None:
if len(__UpperCAmelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_A = self.get_image_description(__UpperCAmelCase )
# get prompt text embeddings for content and style
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_A = slerp(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_A = text_embeddings.repeat_interleave(__UpperCAmelCase , dim=0 )
# set timesteps
_A = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_A = {}
if accepts_offset:
_A = 1
self.scheduler.set_timesteps(__UpperCAmelCase , **__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_A , _A = self.get_timesteps(__UpperCAmelCase , __UpperCAmelCase , self.device )
_A = timesteps[:1].repeat(__UpperCAmelCase )
# Preprocess image
_A = preprocess(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text_embeddings.dtype , self.device , __UpperCAmelCase )
_A = preprocess(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text_embeddings.dtype , self.device , __UpperCAmelCase )
_A = slerp(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if clip_guidance_scale > 0:
_A = self.get_clip_image_embeddings(__UpperCAmelCase , __UpperCAmelCase )
_A = self.get_clip_image_embeddings(__UpperCAmelCase , __UpperCAmelCase )
_A = slerp(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = content_text_input.input_ids.shape[-1]
_A = self.tokenizer([""] , padding="max_length" , max_length=__UpperCAmelCase , return_tensors="pt" )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_A = uncond_embeddings.repeat_interleave(__UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_A = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
_A = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
# check if the scheduler accepts generator
_A = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_A = generator
with self.progress_bar(total=__UpperCAmelCase ):
for i, t in enumerate(__UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_A = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_A , _A = self.cond_fn(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 1 / 0.18215 * latents
_A = self.vae.decode(__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 79 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Dict = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCamelCase_ : Dict = 3_0_0 # TEMPERATURE (unit = K)
def _A ( lowercase , lowercase , lowercase , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 0 |
A__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [False] * len(snake_case )
_lowerCAmelCase = [s]
_lowerCAmelCase = True
while queue:
_lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
_lowerCAmelCase = True
_lowerCAmelCase = u
return visited[t]
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = [-1] * (len(snake_case ))
_lowerCAmelCase = 0
_lowerCAmelCase = []
_lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = float("""Inf""" )
_lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase = min(snake_case , graph[parent[s]][s] )
_lowerCAmelCase = parent[s]
max_flow += path_flow
_lowerCAmelCase = sink
while v != source:
_lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase = parent[v]
for i in range(len(snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 82 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_=2_8_1_2_3 ):
_UpperCamelCase : Any = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_UpperCamelCase : List[Any] = set()
_UpperCamelCase : str = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 83 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '▁'
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = BertGenerationTokenizer
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Union[str, Any] = True
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase_ :Dict = BertGenerationTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = """<s>"""
lowerCAmelCase_ :Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__A ) , 1002 )
def __lowerCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = BertGenerationTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ :Dict = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = """Hello World!"""
lowerCAmelCase_ :Optional[Any] = [1_8536, 2260, 101]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCAmelCase_ :Tuple = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase_ :Tuple = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ :str = """ """.join(__A )
lowerCAmelCase_ :Tuple = self.big_tokenizer.encode_plus(__A , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase_ :Any = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__A )
lowerCAmelCase_ :int = BertGenerationConfig()
lowerCAmelCase_ :Tuple = BertGenerationEncoder(__A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# fmt: off
lowerCAmelCase_ :Tuple = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 84 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[32, 64, 128] , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2"] , a__=[1, 2] , ) -> Any:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.type_sequence_label_size
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Any = False
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(a__ , a__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(a__ )
snake_case_ = self.default_image_processor
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case_ = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**a__ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a__ )
snake_case_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = FocalNetConfig
lowerCAmelCase_ : Tuple = False
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = FocalNetModelTester(self )
| 85 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
"""simple docstring"""
from collections import defaultdict
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCAmelCase : Optional[int] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_SCREAMING_SNAKE_CASE ) )
]
__lowerCAmelCase : int = defaultdict(_SCREAMING_SNAKE_CASE ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCAmelCase : Dict = (1 << len(_SCREAMING_SNAKE_CASE )) - 1
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCAmelCase : int = self.count_ways_until(_SCREAMING_SNAKE_CASE , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCAmelCase : Dict = total_ways_util
return self.dp[mask][task_no]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
# Store the list of persons for each task
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in task_performed[i]:
self.task[j].append(_SCREAMING_SNAKE_CASE )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCamelCase__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCamelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 86 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 0 |
def a__ ( A_, A_ ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(A_, x % y )
def a__ ( A_, A_ ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(A_, A_ )
def a__ ( A_ = 20 ):
'''simple docstring'''
__magic_name__ = 1
for i in range(1, n + 1 ):
__magic_name__ = lcm(A_, A_ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCAmelCase = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __magic_name__ :
def __init__( self : int ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any]=16 ,_UpperCAmelCase : int=13 ,_UpperCAmelCase : Union[str, Any]=7 ,_UpperCAmelCase : List[Any]=14 ,_UpperCAmelCase : List[str]=10 ,_UpperCAmelCase : Any=19 ,_UpperCAmelCase : List[str]=5 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : List[str]=16 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Dict=4 ,_UpperCAmelCase : Union[str, Any]=4 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Union[str, Any]=0.1 ,_UpperCAmelCase : int=[1, 2, 3, 4, 5] ,_UpperCAmelCase : Union[str, Any]=25 ,_UpperCAmelCase : Optional[int]=5 ,):
_a : int = d_model
_a : Optional[Any] = parent
_a : Optional[int] = batch_size
_a : str = prediction_length
_a : Any = context_length
_a : Dict = cardinality
_a : List[str] = num_time_features
_a : Tuple = lags_sequence
_a : Any = embedding_dimension
_a : Optional[int] = is_training
_a : str = hidden_size
_a : int = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : Any = hidden_act
_a : str = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Any = context_length
_a : Union[str, Any] = prediction_length + label_length
_a : Optional[int] = label_length
_a : Optional[int] = moving_average
_a : Union[str, Any] = autocorrelation_factor
def __lowercase ( self : List[Any] ):
return AutoformerConfig(
d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,)
def __lowercase ( self : int ,_UpperCAmelCase : List[Any] ):
_a : str = config.context_length + max(config.lags_sequence )
_a : Dict = ids_tensor([self.batch_size, 1] ,config.cardinality[0] )
_a : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a : Optional[int] = floats_tensor([self.batch_size, _past_length] )
_a : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a : List[str] = floats_tensor([self.batch_size, config.prediction_length] )
_a : List[str] = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __lowercase ( self : Any ):
_a : str = self.get_config()
_a : Optional[Any] = self.prepare_autoformer_inputs_dict(_UpperCAmelCase )
return config, inputs_dict
def __lowercase ( self : List[str] ):
_a , _a : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
_a : Any = AutoformerModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
_a : List[Any] = model(**_UpperCAmelCase )
_a : List[Any] = outputs.encoder_last_hidden_state
_a : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = model.get_encoder()
encoder.save_pretrained(_UpperCAmelCase )
_a : Union[str, Any] = AutoformerEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_a , _a , _a , _a , _a : str = model.create_network_inputs(**_UpperCAmelCase )
_a , _a : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,)
_a : Union[str, Any] = encoder(inputs_embeds=_UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a : List[str] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 )
.unsqueeze(1 )
.repeat(1 ,config.prediction_length ,1 )
)
_a : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,)
_a : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
_a : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Union[str, Any] = model.get_decoder()
decoder.save_pretrained(_UpperCAmelCase )
_a : int = AutoformerDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_a : Tuple = decoder(
trend=_UpperCAmelCase ,inputs_embeds=_UpperCAmelCase ,encoder_hidden_states=_UpperCAmelCase ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase : Any = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase : Any = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = False
def __lowercase ( self : List[str] ):
_a : int = AutoformerModelTester(self )
_a : Union[str, Any] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __lowercase ( self : Dict ):
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a : Dict = model_class(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
_a , _a : List[str] = model_class.from_pretrained(_UpperCAmelCase ,output_loading_info=_UpperCAmelCase )
self.assertEqual(info['missing_keys'] ,[] )
def __lowercase ( self : Tuple ):
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase )
@unittest.skip(reason='Model has no tokens embeddings' )
def __lowercase ( self : int ):
pass
def __lowercase ( self : Any ):
_a : Any = inspect.signature(getattr(_UpperCAmelCase ,'forward' ) )
# The main input is the name of the argument after `self`
_a : List[str] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name ,_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : Dict = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = True
_a : Optional[Any] = getattr(self.model_tester ,'seq_length' ,_UpperCAmelCase )
_a : List[Any] = getattr(self.model_tester ,'decoder_seq_length' ,_UpperCAmelCase )
_a : Optional[int] = getattr(self.model_tester ,'encoder_seq_length' ,_UpperCAmelCase )
_a : Union[str, Any] = getattr(self.model_tester ,'d_model' ,_UpperCAmelCase )
_a : Optional[int] = getattr(self.model_tester ,'num_attention_heads' ,_UpperCAmelCase )
_a : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a : Optional[int] = True
_a : int = False
_a : str = True
_a : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[str] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Optional[int] = True
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : int = outputs.encoder_attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
_a : Tuple = len(_UpperCAmelCase )
_a : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
# decoder attentions
_a : str = outputs.decoder_attentions
self.assertIsInstance(_UpperCAmelCase ,(list, tuple) )
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# cross attentions
_a : Any = outputs.cross_attentions
self.assertIsInstance(_UpperCAmelCase ,(list, tuple) )
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# Check attention is always last and order is fine
_a : str = True
_a : int = True
_a : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertEqual(out_len + 2 ,len(_UpperCAmelCase ) )
_a : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
@is_flaky()
def __lowercase ( self : str ):
super().test_retain_grad_hidden_states_attentions()
def __lowerCamelCase ( lowerCAmelCase_="train-batch.pt" ) -> List[Any]:
_a : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=lowerCAmelCase_ , repo_type='dataset' )
_a : Union[str, Any] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : int ):
_a : Optional[Any] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_UpperCAmelCase )
_a : Optional[Any] = prepare_batch()
with torch.no_grad():
_a : Union[str, Any] = model(
past_values=batch['past_values'] ,past_time_features=batch['past_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,static_categorical_features=batch['static_categorical_features'] ,future_values=batch['future_values'] ,future_time_features=batch['future_time_features'] ,)[0]
_a : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape ,_UpperCAmelCase )
_a : Any = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
def __lowercase ( self : Dict ):
_a : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_batch('val-batch.pt' )
with torch.no_grad():
_a : Optional[Any] = model(
past_values=batch['past_values'] ,past_time_features=batch['past_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,static_categorical_features=batch['static_categorical_features'] ,).encoder_last_hidden_state
_a : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape ,_UpperCAmelCase )
_a : Union[str, Any] = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] ,device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] ,_UpperCAmelCase ,atol=_UpperCAmelCase ) )
def __lowercase ( self : Any ):
_a : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_UpperCAmelCase )
_a : Tuple = prepare_batch('val-batch.pt' )
with torch.no_grad():
_a : Union[str, Any] = model.generate(
static_categorical_features=batch['static_categorical_features'] ,past_time_features=batch['past_time_features'] ,past_values=batch['past_values'] ,future_time_features=batch['future_time_features'] ,past_observed_mask=batch['past_observed_mask'] ,)
_a : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape ,_UpperCAmelCase )
_a : List[str] = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] ,device=_UpperCAmelCase )
_a : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,_UpperCAmelCase ,rtol=1E-1 ) )
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
__A = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 90 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : Tuple = scope
SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ : List[str] = t
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
return True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self)
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Dict = type
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_)
SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768])
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
| 91 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) if (len(SCREAMING_SNAKE_CASE_ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(SCREAMING_SNAKE_CASE_ ) , "Postfix".center(SCREAMING_SNAKE_CASE_ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
stack.append(SCREAMING_SNAKE_CASE_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE_ ) # push x to stack
print(
x.center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , ("".join(SCREAMING_SNAKE_CASE_ )).ljust(SCREAMING_SNAKE_CASE_ ) , sep=" | " , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE_ ) # return Postfix as str
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if infix[i] == "(":
__lowerCAmelCase = ")" # change "(" to ")"
elif infix[i] == ")":
__lowerCAmelCase = "(" # change ")" to "("
return (infix_2_postfix("".join(SCREAMING_SNAKE_CASE_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCamelCase__ = input("""\nEnter an Infix Equation = """) # Input an Infix equation
UpperCamelCase__ = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 92 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , lowerCamelCase_ ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = load_tool('''text-to-speech''' )
self.tool.setup()
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Dict = self.tool('''hey''' )
lowercase_ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def _snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ : Optional[Any] = self.tool('''hey''' )
lowercase_ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 93 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
snake_case : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
snake_case : Union[str, Any] = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
a :Tuple = F'''facebook/wmt19-{pair}'''
a :Optional[Any] = self.get_tokenizer(_lowerCamelCase )
a :str = self.get_model(_lowerCamelCase )
a :Optional[int] = bleu_data[pair]['''src''']
a :Optional[Any] = bleu_data[pair]['''tgt''']
a :str = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase )
a :Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
a :str = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
a :int = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
| 94 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
_lowerCamelCase : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCamelCase : Optional[Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__snake_case = logging.getLogger(__name__)
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_a = field(default=A__ , metadata={'help': 'Whether tp freeze the encoder.'} )
_a = field(default=A__ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowercase :
"""simple docstring"""
_a = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_a = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
_a = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
_a = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
_a = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
_a = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
_a = field(default=A__ , metadata={'help': 'Source language id for translation.'} )
_a = field(default=A__ , metadata={'help': 'Target language id for translation.'} )
_a = field(default=A__ , metadata={'help': '# num_beams to use for evaluation.'} )
_a = field(
default=A__ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__a , os.path.join(__a , f'''{split}_results.json''' ) )
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__a , __a , getattr(__a , __a ) )
UpperCamelCase__ :Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ :int = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase__ :Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
UpperCamelCase__ :Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase__ :List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase__ :str = SeqaSeqDataset
# Get datasets
UpperCamelCase__ :List[Any] = (
dataset_class(
__a , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCamelCase__ :List[str] = (
dataset_class(
__a , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase__ :Optional[int] = (
dataset_class(
__a , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase__ :List[str] = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
UpperCamelCase__ :Optional[int] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
UpperCamelCase__ :Any = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCamelCase__ :List[str] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase__ :Any = train_result.metrics
UpperCamelCase__ :str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ :Optional[int] = trainer.evaluate(metric_key_prefix='''val''' )
UpperCamelCase__ :Union[str, Any] = data_args.n_val
UpperCamelCase__ :List[Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ :Any = trainer.predict(test_dataset=__a , metric_key_prefix='''test''' )
UpperCamelCase__ :Optional[int] = test_output.metrics
UpperCamelCase__ :Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase__ :str = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
UpperCamelCase__ :Dict = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
UpperCamelCase__ :List[str] = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( __a ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 97 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Union[str, Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : Union[str, Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len([g for position, g in enumerate(lowerCamelCase ) if g == main_target[position]] )
return (item, float(lowerCamelCase ))
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ = random.choice(lowerCamelCase )
return "".join(lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ = int(parent_a[1] * 1_0_0 ) + 1
UpperCAmelCase__ = 1_0 if child_n >= 1_0 else child_n
for _ in range(lowerCamelCase ):
UpperCAmelCase__ = population_score[random.randint(0 , lowerCamelCase )][0]
UpperCAmelCase__ , UpperCAmelCase__ = crossover(parent_a[0] , lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(lowerCamelCase , lowerCamelCase ) )
pop.append(mutate(lowerCamelCase , lowerCamelCase ) )
return pop
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCamelCase )
# Generate random starting population.
UpperCAmelCase__ = []
for _ in range(lowerCamelCase ):
population.append(''.join([random.choice(lowerCamelCase ) for i in range(len(lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ = [evaluate(lowerCamelCase , lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCamelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ = [
(item, score / len(lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase ):
population.extend(select(population_score[int(lowerCamelCase )] , lowerCamelCase , lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCAmelCase__ : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 98 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: List[Any] = torch.load(__lowercase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]:
A: Tuple = OrderedDict()
A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A: int = key
for name_pair in rename_keys_prefix:
A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] )
A: Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A: int = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A: Optional[Any] = '''pretraining'''
if "vcr" in checkpoint_path:
A: Optional[int] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
A: Tuple = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 5_1_2}
A: List[str] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A: List[str] = {'''visual_embedding_dim''': 2_0_4_8}
A: Optional[int] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
A: Union[str, Any] = '''vqa'''
elif "nlvr" in checkpoint_path:
A: Optional[int] = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
A: str = '''nlvr'''
A: Union[str, Any] = VisualBertConfig(**__lowercase )
# Load State Dict
A: Union[str, Any] = load_state_dict(__lowercase )
A: str = get_new_dict(__lowercase , __lowercase )
if model_type == "pretraining":
A: Optional[Any] = VisualBertForPreTraining(__lowercase )
elif model_type == "vqa":
A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase )
elif model_type == "nlvr":
A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase )
elif model_type == "multichoice":
A: Any = VisualBertForMultipleChoice(__lowercase )
model.load_state_dict(__lowercase )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
"""simple docstring"""
__magic_name__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 100 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 0 |
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
return False
lowercase = len(lowerCAmelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCAmelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :int = input("Enter numbers separated by comma:\n").strip()
lowercase__ :List[str] = [int(item.strip()) for item in user_input.split(",")]
lowercase__ :Optional[int] = int(input("Enter the number to be found in the list:\n").strip())
lowercase__ :Tuple = "" if binary_search(sequence, target) else "not "
print(F'{target} was {not_str}found in {sequence}')
| 101 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCamelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCamelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE( ) -> Dict:
A: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A: List[Any] = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[Any] = set()
A: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: List[Any] = char
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]:
'''simple docstring'''
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: str = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
A: Union[str, Any] = errors # how to handle errors in decoding
A: Optional[int] = bytes_to_unicode()
A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: int = merges_handle.read().split('''\n''' )[1:-1]
A: str = [tuple(merge.split() ) for merge in bpe_merges]
A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = {}
A: Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: str = tuple(SCREAMING_SNAKE_CASE_ )
A: str = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Optional[Any] = bigram
A: Tuple = []
A: List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: Any = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ )
A: str = word
return word
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A: Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
A: Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ )
A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: int = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Any = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Union[str, Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A: Dict = [self.sep_token_id]
A: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
'''simple docstring'''
A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
A: List[Any] = ''' ''' + text
return (text, kwargs)
| 319 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
"""
SCREAMING_SNAKE_CASE : Any = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE : Dict = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = 16 , a_ = True , a_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case : Optional[Any] = '''cuda'''
else:
__snake_case : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case : int = AutoModelForCausalLM.from_pretrained(a_ )
__snake_case : Optional[int] = model.to(a_ )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case : List[Any] = model.config.max_length - 1
else:
__snake_case : Dict = model.config.max_length
__snake_case : Tuple = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_ )
__snake_case : List[Any] = encodings['''input_ids''']
__snake_case : str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case : Union[str, Any] = []
__snake_case : str = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
__snake_case : Optional[int] = min(start_index + batch_size , len(a_ ) )
__snake_case : int = encoded_texts[start_index:end_index]
__snake_case : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
__snake_case : List[Any] = encoded_batch
with torch.no_grad():
__snake_case : List[str] = model(a_ , attention_mask=a_ ).logits
__snake_case : List[str] = out_logits[..., :-1, :].contiguous()
__snake_case : int = labels[..., 1:].contiguous()
__snake_case : int = attn_mask[..., 1:].contiguous()
__snake_case : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 102 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
A__ : str = logging.get_logger(__name__)
# General docstring
A__ : List[str] = '''ResNetConfig'''
# Base docstring
A__ : Tuple = '''microsoft/resnet-50'''
A__ : List[str] = [1, 2048, 7, 7]
# Image classification docstring
A__ : str = '''microsoft/resnet-50'''
A__ : List[str] = '''tiger cat'''
A__ : Dict = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __snake_case ( nn.Module ):
def __init__( self : int , A_ : int , A_ : int , A_ : int = 3 , A_ : int = 1 , A_ : str = "relu"):
super().__init__()
lowerCAmelCase_ : Optional[Any] = nn.Convad(
A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , bias=A_)
lowerCAmelCase_ : Any = nn.BatchNormad(A_)
lowerCAmelCase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tensor):
lowerCAmelCase_ : Tuple = self.convolution(A_)
lowerCAmelCase_ : str = self.normalization(A_)
lowerCAmelCase_ : Tuple = self.activation(A_)
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self : Optional[Any] , A_ : ResNetConfig):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
lowerCAmelCase_ : Any = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
lowerCAmelCase_ : Dict = config.num_channels
def UpperCAmelCase__ ( self : Any , A_ : Tensor):
lowerCAmelCase_ : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
lowerCAmelCase_ : int = self.embedder(A_)
lowerCAmelCase_ : List[str] = self.pooler(A_)
return embedding
class __snake_case ( nn.Module ):
def __init__( self : List[str] , A_ : int , A_ : int , A_ : int = 2):
super().__init__()
lowerCAmelCase_ : Optional[Any] = nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_)
lowerCAmelCase_ : int = nn.BatchNormad(A_)
def UpperCAmelCase__ ( self : Any , A_ : Tensor):
lowerCAmelCase_ : str = self.convolution(A_)
lowerCAmelCase_ : Optional[int] = self.normalization(A_)
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self : int , A_ : int , A_ : int , A_ : int = 1 , A_ : str = "relu"):
super().__init__()
lowerCAmelCase_ : Tuple = in_channels != out_channels or stride != 1
lowerCAmelCase_ : str = (
ResNetShortCut(A_ , A_ , stride=A_) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : str = nn.Sequential(
ResNetConvLayer(A_ , A_ , stride=A_) , ResNetConvLayer(A_ , A_ , activation=A_) , )
lowerCAmelCase_ : List[Any] = ACTaFN[activation]
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Dict):
lowerCAmelCase_ : Optional[int] = hidden_state
lowerCAmelCase_ : Any = self.layer(A_)
lowerCAmelCase_ : Optional[int] = self.shortcut(A_)
hidden_state += residual
lowerCAmelCase_ : Tuple = self.activation(A_)
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self : Optional[int] , A_ : int , A_ : int , A_ : int = 1 , A_ : str = "relu" , A_ : int = 4):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : List[str] = out_channels // reduction
lowerCAmelCase_ : Union[str, Any] = (
ResNetShortCut(A_ , A_ , stride=A_) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : List[Any] = nn.Sequential(
ResNetConvLayer(A_ , A_ , kernel_size=1) , ResNetConvLayer(A_ , A_ , stride=A_) , ResNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_) , )
lowerCAmelCase_ : int = ACTaFN[activation]
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Dict):
lowerCAmelCase_ : List[str] = hidden_state
lowerCAmelCase_ : Optional[Any] = self.layer(A_)
lowerCAmelCase_ : Optional[Any] = self.shortcut(A_)
hidden_state += residual
lowerCAmelCase_ : Dict = self.activation(A_)
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self : int , A_ : ResNetConfig , A_ : int , A_ : int , A_ : int = 2 , A_ : int = 2 , ):
super().__init__()
lowerCAmelCase_ : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowerCAmelCase_ : int = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , stride=A_ , activation=config.hidden_act) , *[layer(A_ , A_ , activation=config.hidden_act) for _ in range(depth - 1)] , )
def UpperCAmelCase__ ( self : Tuple , A_ : Tensor):
lowerCAmelCase_ : Union[str, Any] = input
for layer in self.layers:
lowerCAmelCase_ : List[str] = layer(A_)
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self : List[str] , A_ : ResNetConfig):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
lowerCAmelCase_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(A_ , config.depths[1:]):
self.stages.append(ResNetStage(A_ , A_ , A_ , depth=A_))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tensor , A_ : bool = False , A_ : bool = True):
lowerCAmelCase_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ : int = hidden_states + (hidden_state,)
lowerCAmelCase_ : int = stage_module(A_)
if output_hidden_states:
lowerCAmelCase_ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ , hidden_states=A_ , )
class __snake_case ( UpperCamelCase_ ):
_a = ResNetConfig
_a = '''resnet'''
_a = '''pixel_values'''
_a = True
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Any):
if isinstance(A_ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''')
elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCAmelCase__ ( self : str , A_ : int , A_ : Union[str, Any]=False):
if isinstance(A_ , A_):
lowerCAmelCase_ : Union[str, Any] = value
A__ : Any = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
A__ : Union[str, Any] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' ,UpperCamelCase_ ,)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , A_ : Any):
super().__init__(A_)
lowerCAmelCase_ : str = config
lowerCAmelCase_ : Union[str, Any] = ResNetEmbeddings(A_)
lowerCAmelCase_ : Dict = ResNetEncoder(A_)
lowerCAmelCase_ : Any = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase__ ( self : Optional[int] , A_ : Tensor , A_ : Optional[bool] = None , A_ : Optional[bool] = None):
lowerCAmelCase_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Tuple = self.embedder(A_)
lowerCAmelCase_ : Tuple = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_)
lowerCAmelCase_ : List[Any] = encoder_outputs[0]
lowerCAmelCase_ : str = self.pooler(A_)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' ,UpperCamelCase_ ,)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Dict , A_ : List[str]):
super().__init__(A_)
lowerCAmelCase_ : Optional[Any] = config.num_labels
lowerCAmelCase_ : Union[str, Any] = ResNetModel(A_)
# classification head
lowerCAmelCase_ : List[str] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase__ ( self : int , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.LongTensor] = None , A_ : Optional[bool] = None , A_ : Optional[bool] = None , ):
lowerCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = self.resnet(A_ , output_hidden_states=A_ , return_dict=A_)
lowerCAmelCase_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : List[str] = self.classifier(A_)
lowerCAmelCase_ : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : Optional[Any] = '''single_label_classification'''
else:
lowerCAmelCase_ : str = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase_ : List[str] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
lowerCAmelCase_ : Dict = loss_fct(A_ , A_)
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : int = CrossEntropyLoss()
lowerCAmelCase_ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : List[str] = BCEWithLogitsLoss()
lowerCAmelCase_ : Tuple = loss_fct(A_ , A_)
if not return_dict:
lowerCAmelCase_ : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states)
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' ,UpperCamelCase_ ,)
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ):
def __init__( self : Dict , A_ : int):
super().__init__(A_)
super()._init_backbone(A_)
lowerCAmelCase_ : List[Any] = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase_ : Dict = ResNetEmbeddings(A_)
lowerCAmelCase_ : Any = ResNetEncoder(A_)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_)
@replace_return_docstrings(output_type=A_ , config_class=_CONFIG_FOR_DOC)
def UpperCAmelCase__ ( self : List[str] , A_ : Tensor , A_ : Optional[bool] = None , A_ : Optional[bool] = None):
lowerCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = self.embedder(A_)
lowerCAmelCase_ : str = self.encoder(A_ , output_hidden_states=A_ , return_dict=A_)
lowerCAmelCase_ : Union[str, Any] = outputs.hidden_states
lowerCAmelCase_ : List[Any] = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase_ : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A_ , )
| 103 |
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A: str = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowercase ) )
]
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]:
if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A: Union[str, Any] = len(__lowercase )
A: str = matrix_length // 2
A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )]
A: Optional[Any] = [
[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase )
]
A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )]
A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]:
return len(__lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
print('''\n'''.join(str(__lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase ) == (2, 2):
return default_matrix_multiplication(__lowercase , __lowercase )
A , A , A , A: Union[str, Any] = split_matrix(__lowercase )
A , A , A , A: List[Any] = split_matrix(__lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase )
A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) )
A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) )
A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
A: Any = matrix_addition(__lowercase , __lowercase )
A: List[Any] = matrix_addition(__lowercase , __lowercase )
A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase )
# construct the new matrix from our 4 quadrants
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list:
if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]:
A: int = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__lowercase )
A: str = matrix_dimensions(__lowercase )
A: str = matrix_dimensions(__lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A: Union[str, Any] = max(*__lowercase , *__lowercase )
A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) )
A: List[Any] = matrixa
A: Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A: Any = actual_strassen(__lowercase , __lowercase )
# Removing the additional zeros
for i in range(0 , __lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 319 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCAmelCase__ = '''▁'''
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Any ,lowercase__ : Any ,lowercase__ : Tuple="</s>" ,lowercase__ : Union[str, Any]="<unk>" ,lowercase__ : Union[str, Any]="<pad>" ,lowercase__ : int=1_0_0 ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Dict[str, Any]] = None ,lowercase__ : int=True ,**lowercase__ : int ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase = [F"<extra_id_{i}>" for i in range(lowercase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowercase = len(set(filter(lambda lowercase__ : bool('''extra_id''' in str(lowercase__ ) ) ,lowercase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__lowercase = legacy
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,extra_ids=lowercase__ ,additional_special_tokens=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,legacy=lowercase__ ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = extra_ids
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Any ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' ,lowercase__ ,)
return max_model_length
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase__ )) + [1]
return ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return list(
set(filter(lambda lowercase__ : bool(re.search(r'''<extra_id_\d+>''' ,lowercase__ ) ) is not None ,self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
return [self._convert_token_to_id(lowercase__ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[int] ):
if len(lowercase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = self._add_eos_if_not_present(lowercase__ )
if token_ids_a is None:
return token_ids_a
else:
__lowercase = self._add_eos_if_not_present(lowercase__ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : Optional[int] ,lowercase__ : int ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : "TextInput" ,**lowercase__ : Optional[int] ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__lowercase = SPIECE_UNDERLINE + text.replace(lowercase__ ,''' ''' )
return super().tokenize(lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,**lowercase__ : Any ):
if not self.legacy:
__lowercase = text.startswith(lowercase__ )
if is_first:
__lowercase = text[1:]
__lowercase = self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(lowercase__ ):
__lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ):
if token.startswith('''<extra_id_''' ):
__lowercase = re.match(r'''<extra_id_(\d+)>''' ,lowercase__ )
__lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ):
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(lowercase__ )
else:
__lowercase = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 104 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 0 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[Any] = [False] * len(A_ )
lowerCAmelCase__ : Optional[int] = [-1] * len(A_ )
def dfs(A_ , A_ ):
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Optional[Any] = c
for u in graph[v]:
if not visited[u]:
dfs(A_ , 1 - c )
for i in range(len(A_ ) ):
if not visited[i]:
dfs(A_ , 0 )
for i in range(len(A_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCamelCase : Tuple = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 106 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Any = {'vocab_file': 'sentencepiece.model'}
__lowerCAmelCase : Any = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__lowerCAmelCase : str = {
'google/rembert': 256,
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : Dict="[SEP]" , __lowerCamelCase : Optional[Any]="[UNK]" , __lowerCamelCase : List[str]="[SEP]" , __lowerCamelCase : Union[str, Any]="[PAD]" , __lowerCamelCase : Tuple="[CLS]" , __lowerCamelCase : Union[str, Any]="[MASK]" , **__lowerCamelCase : int , ) -> Tuple:
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor()
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
return len(self.sp_model )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Dict:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __lowerCamelCase : Dict ) -> Any:
a = d
a = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=False ) -> str:
a = self.sp_model.EncodeAsPieces(__lowerCamelCase )
return pieces
def __UpperCAmelCase ( self : str , __lowerCamelCase : Dict ) -> Optional[int]:
return self.sp_model.PieceToId(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[Any] ) -> List[str]:
a = self.sp_model.decode_pieces(__lowerCamelCase )
return out_string
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase ) )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 107 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = cva.getAffineTransform(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return cva.warpAffine(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
lowerCAmelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase__ ,lowerCAmelCase__ = gray_img.shape
# set different points to rotate image
lowerCAmelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
lowerCAmelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
lowerCAmelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
lowerCAmelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
lowerCAmelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase__ = plt.figure(1)
lowerCAmelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 108 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCamelCase = '''
import os
'''
UpperCamelCase = '''
def foo():
import os
return False
'''
UpperCamelCase = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
except:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
UpperCamelCase = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
UpperCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = os.path.join(__lowercase , '''test_file.py''' )
with open(__lowercase , '''w''' ) as _tmp_file:
_tmp_file.write(__lowercase )
A: List[Any] = get_imports(__lowercase )
assert parsed_imports == ["os"]
| 319 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A: List[Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Any:
'''simple docstring'''
UpperCAmelCase : str = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id="""test-model-flax""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCAmelCase : int = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCAmelCase : List[str] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCAmelCase : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCAmelCase : Dict = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 , msg=F"{key} not identical" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
UpperCAmelCase : Any = True
UpperCAmelCase : Dict = flatten_dict(modela.params )
UpperCAmelCase : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Tuple = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase : int = FlaxBertModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase : Tuple = FlaxBertModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , max_shard_size="""10KB""" )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertTrue(check_models_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = """bert"""
UpperCAmelCase : Union[str, Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = """bert"""
UpperCAmelCase : Dict = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 109 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]:
A: str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
for i in range(config.num_hidden_layers ):
A: Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A: Dict = in_proj_weight[
: config.hidden_size, :
]
A: int = in_proj_bias[: config.hidden_size]
A: Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A: int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A: Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A: Optional[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
A: Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: List[Any] = dct.pop(__lowercase )
A: int = val
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase )
A: Tuple = False
A: str = False
A: List[Any] = False
A: Optional[int] = False
if "vqa" in checkpoint_url:
A: Union[str, Any] = True
A: Union[str, Any] = 3_1_2_9
A: List[Any] = '''huggingface/label-files'''
A: Any = '''vqa2-id2label.json'''
A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Any = idalabel
A: Optional[Any] = {v: k for k, v in idalabel.items()}
A: List[str] = ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
A: Dict = True
A: str = 2
A: Union[str, Any] = {0: '''False''', 1: '''True'''}
A: Any = {v: k for k, v in config.idalabel.items()}
A: Optional[Any] = 3
A: Any = ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
A: Tuple = True
A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
A: Tuple = True
A: Optional[int] = ViltForMaskedLM(__lowercase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict''']
A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
A: Optional[Any] = ViltImageProcessor(size=3_8_4 )
A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A: Optional[int] = ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw )
A: Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: List[str] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw )
if mlm_model:
A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].'''
else:
A: Optional[int] = '''How many cats are there?'''
A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' )
A: Any = model(**__lowercase )
# Verify outputs
if mlm_model:
A: Any = torch.Size([1, 1_1, 3_0_5_2_2] )
A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify masked token prediction equals "cats"
A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A: Any = torch.Size([1, 3_1_2_9] )
A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 )
# verify vqa prediction equals "2"
A: Dict = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A: Union[str, Any] = torch.Size([1, 2] )
A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 319 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCAmelCase ( snake_case = 8 ):
"""simple docstring"""
_lowerCAmelCase = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
i -= len(__lowercase )
_lowerCAmelCase = i // 3
_lowerCAmelCase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCAmelCase = (
chars_incl
+ random(__lowercase , quotient + remainder )
+ random(__lowercase , __lowercase )
+ random(__lowercase , __lowercase )
)
_lowerCAmelCase = list(__lowercase )
shuffle(__lowercase )
return "".join(__lowercase )
# random is a generalised function for letters, characters and numbers
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return "".join(secrets.choice(__lowercase ) for _ in range(__lowercase ) )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
pass # Put your code here...
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
pass # Put your code here...
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
pass # Put your code here...
def _UpperCAmelCase ( snake_case , snake_case = 8 ):
"""simple docstring"""
if len(__lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCAmelCase = any(char in ascii_uppercase for char in password )
_lowerCAmelCase = any(char in ascii_lowercase for char in password )
_lowerCAmelCase = any(char in digits for char in password )
_lowerCAmelCase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = int(input("""Please indicate the max length of your password: """ ).strip() )
_lowerCAmelCase = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(__lowercase ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(__lowercase , __lowercase ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 82 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
UpperCamelCase = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
A: Tuple = EfficientNetConfig()
A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim''']
A: Optional[int] = CONFIG_MAP[model_name]['''width_coef''']
A: str = CONFIG_MAP[model_name]['''depth_coef''']
A: Dict = CONFIG_MAP[model_name]['''image_size''']
A: str = CONFIG_MAP[model_name]['''dropout_rate''']
A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding''']
A: Optional[Any] = '''huggingface/label-files'''
A: List[str] = '''imagenet-1k-id2label.json'''
A: Dict = 1_0_0_0
A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()}
A: int = idalabel
A: Tuple = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE( ) -> Any:
A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[str] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
A: List[str] = sorted(set(__lowercase ) )
A: Dict = len(__lowercase )
A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
A: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
A: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
A: Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
A: str = '''efficientnet.''' + item[1]
A: int = '''classifier.weight'''
A: Tuple = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
A: Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) )
else:
A: Any = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
A: Optional[int] = model_classes[model_name](
include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
A: List[str] = original_model.trainable_variables
A: Optional[Any] = original_model.non_trainable_variables
A: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A: int = param.numpy()
A: Tuple = list(tf_params.keys() )
# Load HuggingFace model
A: Dict = get_efficientnet_config(__lowercase )
A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval()
A: Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
A: int = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
A: List[Any] = convert_image_processor(__lowercase )
A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
A: str = hf_model(**__lowercase )
A: List[Any] = outputs.logits.detach().numpy()
# Original model inference
A: Any = False
A: List[Any] = CONFIG_MAP[model_name]['''image_size''']
A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A: str = image.img_to_array(__lowercase )
A: Dict = np.expand_dims(__lowercase , axis=0 )
A: Any = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A: int = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Optional[Any] = '''ZinengTang/tvlt-base'''
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Optional[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = self.get_image_processor()
lowerCAmelCase__ : str = self.get_feature_extractor()
lowerCAmelCase__ : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[str] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : int = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_feature_extractor()
lowerCAmelCase__ : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = np.ones([1_2000] )
lowerCAmelCase__ : Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
lowerCAmelCase__ : int = processor(audio=SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : List[Any] = self.get_feature_extractor()
lowerCAmelCase__ : List[Any] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = np.ones([3, 224, 224] )
lowerCAmelCase__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
lowerCAmelCase__ : Any = processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_feature_extractor()
lowerCAmelCase__ : Any = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = np.ones([1_2000] )
lowerCAmelCase__ : Dict = np.ones([3, 224, 224] )
lowerCAmelCase__ : Union[str, Any] = processor(audio=SCREAMING_SNAKE_CASE_ ,images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ : List[str] = self.get_feature_extractor()
lowerCAmelCase__ : List[Any] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
| 37 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_: Dict ={
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'yjernite/retribert-base-uncased': 5_12,
}
SCREAMING_SNAKE_CASE_: Any ={
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class __A ( UpperCAmelCase_ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
a__ : int = RetriBertTokenizer
a__ : int = ["""input_ids""", """attention_mask"""]
def __init__(self : Optional[int] , __a : Union[str, Any]=None , __a : List[Any]=None , __a : List[Any]=True , __a : int="[UNK]" , __a : Union[str, Any]="[SEP]" , __a : int="[PAD]" , __a : Dict="[CLS]" , __a : int="[MASK]" , __a : List[Any]=True , __a : List[str]=None , **__a : List[Any] , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = do_lower_case
def _lowercase (self : Any , __a : Tuple , __a : Optional[int]=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase (self : List[str] , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_SCREAMING_SNAKE_CASE = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : bool , lowerCamelCase_ : str = None , lowerCamelCase_ : list = None ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
UpperCamelCase = os.path.abspath("""examples""" )
for item in os.listdir(SCREAMING_SNAKE_CASE_ ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
UpperCamelCase = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
if special_strings is not None:
for string in special_strings:
UpperCamelCase = diff.replace(SCREAMING_SNAKE_CASE_ , """""" )
self.assertEqual(SCREAMING_SNAKE_CASE_ , """""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , SCREAMING_SNAKE_CASE_ )
self.one_complete_example("""complete_nlp_example.py""" , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
UpperCamelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example("""complete_cv_example.py""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.one_complete_example("""complete_cv_example.py""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ ):
__lowerCAmelCase = False
@classmethod
def lowerCamelCase_ ( cls : Any ):
"""simple docstring"""
super().setUpClass()
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls : Any ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""epoch 0:""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
if torch.cuda.is_available():
UpperCamelCase = torch.cuda.device_count()
else:
UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE_ )
else:
self.assertIn("""epoch 0:""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = re.findall("""({.+})""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = [r for r in results if '''accuracy''' in r][-1]
UpperCamelCase = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , """tracking""" ) ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 343 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_: Dict = get_activation("gelu")
self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE_) , torch_builtin(SCREAMING_SNAKE_CASE_)))
self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE_) , gelu_new(SCREAMING_SNAKE_CASE_)))
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_: List[Any] = get_activation("gelu")
SCREAMING_SNAKE_CASE_: Optional[int] = get_activation("gelu_10")
SCREAMING_SNAKE_CASE_: Any = torch_builtin(SCREAMING_SNAKE_CASE_)
SCREAMING_SNAKE_CASE_: Optional[int] = geluaa(SCREAMING_SNAKE_CASE_)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(SCREAMING_SNAKE_CASE_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
get_activation("gelu")
get_activation("gelu_10")
get_activation("gelu_fast")
get_activation("gelu_new")
get_activation("gelu_python")
get_activation("gelu_pytorch_tanh")
get_activation("linear")
get_activation("mish")
get_activation("quick_gelu")
get_activation("relu")
get_activation("sigmoid")
get_activation("silu")
get_activation("swish")
get_activation("tanh")
with self.assertRaises(SCREAMING_SNAKE_CASE_):
get_activation("bogus")
with self.assertRaises(SCREAMING_SNAKE_CASE_):
get_activation(SCREAMING_SNAKE_CASE_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: int = get_activation("gelu")
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: List[Any] = get_activation("gelu")
self.assertEqual(acta.a , 1)
with self.assertRaises(SCREAMING_SNAKE_CASE_):
SCREAMING_SNAKE_CASE_: Tuple = acta.a
| 13 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if len(__lowercase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
A: Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCamelCase = numpy.array([0, 0])
__UpperCamelCase = numpy.array([0.5, 0.8660254])
__UpperCamelCase = numpy.array([1, 0])
__UpperCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[numpy.ndarray]:
snake_case_ = initial_vectors
for _ in range(__lowercase ):
snake_case_ = iteration_step(__lowercase )
return vectors
def UpperCAmelCase ( UpperCAmelCase ) -> list[numpy.ndarray]:
snake_case_ = []
for i, start_vector in enumerate(vectors[:-1] ):
snake_case_ = vectors[i + 1]
new_vectors.append(__lowercase )
snake_case_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> numpy.ndarray:
snake_case_ = numpy.radians(__lowercase )
snake_case_ = numpy.cos(__lowercase ), numpy.sin(__lowercase )
snake_case_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowercase , __lowercase )
def UpperCAmelCase ( UpperCAmelCase ) -> None:
snake_case_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
snake_case_ = zip(*__lowercase )
plt.plot(__lowercase , __lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 69 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
from transformers.testing_utils import pytest_terminal_summary_main
A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowercase , id=__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A: Tuple = 0
# Doctest custom flag to ignore output.
UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''')
UpperCamelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CustomOutputChecker
UpperCamelCase = HfDoctestModule
UpperCamelCase = HfDocTestParser
| 319 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( A__ , A__ , A__ , A__ , ) -> list[float]:
"""simple docstring"""
UpperCamelCase = coefficient_matrix.shape
UpperCamelCase = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__lowercase )
if colsa != 1:
UpperCamelCase = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__lowercase )
if rowsa != rowsa:
UpperCamelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__lowercase )
if len(__lowercase ) != rowsa:
UpperCamelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(__lowercase )} and {rowsa}"""
)
raise ValueError(__lowercase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
UpperCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase = table.shape
strictly_diagonally_dominant(__lowercase )
# Iterates the whole matrix for given number of times
for _ in range(__lowercase ):
UpperCamelCase = []
for row in range(__lowercase ):
UpperCamelCase = 0
for col in range(__lowercase ):
if col == row:
UpperCamelCase = table[row][col]
elif col == cols - 1:
UpperCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase = (temp + val) / denom
new_val.append(__lowercase )
UpperCamelCase = new_val
return [float(__lowercase ) for i in new_val]
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = table.shape
UpperCamelCase = True
for i in range(0 , __lowercase ):
UpperCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _a ( lowerCamelCase = "" ):
lowerCamelCase : Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowerCamelCase : List[Any] = BeautifulSoup(requests.get(__lowercase ).text, """html.parser""" )
lowerCamelCase : int = soup.find_all("""td""", attrs="""titleColumn""" )
lowerCamelCase : List[str] = soup.find_all("""td""", class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowercase, __lowercase )
}
def _a ( lowerCamelCase = "IMDb_Top_250_Movies.csv" ):
lowerCamelCase : Dict = get_imdb_top_aaa_movies()
with open(__lowercase, """w""", newline="""""" ) as out_file:
lowerCamelCase : Dict = csv.writer(__lowercase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 287 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int:
A: Any = 1
A: Optional[Any] = 0
for divide_by_number in range(__lowercase , digit + 1 ):
A: list[int] = []
A: List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowercase ):
A: Any = len(__lowercase )
A: Dict = divide_by_number
else:
has_been_divided.append(__lowercase )
A: str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase ( UpperCAmelCase_ ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
@property
def A_ ( self : Tuple ) -> str:
return self.feat_extract_tester.prepare_feat_extract_dict()
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'feature_size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'sampling_rate' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'padding_value' ) )
def A_ ( self : List[str] ) -> Any:
lowerCamelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : Optional[int] = feat_extract.model_input_names[0]
lowerCamelCase__ : str = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , processed_features[input_name] ) ) )
lowerCamelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowerCamelCase__ : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def A_ ( self : List[Any] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : List[str] = feat_extract.model_input_names[0]
lowerCamelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowerCamelCase__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCamelCase__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowerCamelCase__ : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowerCamelCase__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def A_ ( self : Any , UpperCAmelCase : List[Any]=False ) -> Dict:
def _inputs_have_equal_length(UpperCAmelCase : Optional[int] ):
lowerCamelCase__ : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(SCREAMING_SNAKE_CASE_ ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ):
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
return False
for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE_ ) , np.asarray(SCREAMING_SNAKE_CASE_ ) , atol=1e-3 ):
return False
return True
lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Dict = feat_extract.model_input_names[0]
lowerCamelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ : Optional[int] = self.feat_extract_tester.seq_length_diff
lowerCamelCase__ : List[Any] = self.feat_extract_tester.max_seq_length + pad_diff
lowerCamelCase__ : List[str] = self.feat_extract_tester.min_seq_length
lowerCamelCase__ : int = self.feat_extract_tester.batch_size
lowerCamelCase__ : List[Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowerCamelCase__ : int = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[Any] = input_a[input_name]
lowerCamelCase__ : int = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' )
lowerCamelCase__ : Tuple = input_a[input_name]
lowerCamelCase__ : Dict = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowerCamelCase__ : Tuple = input_a[input_name]
lowerCamelCase__ : Union[str, Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )
lowerCamelCase__ : Optional[Any] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' )[input_name]
lowerCamelCase__ : Tuple = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCamelCase__ : Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__ : Any = feat_extract.pad(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=10 )
lowerCamelCase__ : Any = input_a[input_name]
lowerCamelCase__ : str = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , pad_to_multiple_of=10 )
lowerCamelCase__ : Optional[Any] = input_a[input_name]
lowerCamelCase__ : int = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , pad_to_multiple_of=10 , max_length=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Union[str, Any] = input_a[input_name]
lowerCamelCase__ : List[str] = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , pad_to_multiple_of=10 , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='np' , )
lowerCamelCase__ : Union[str, Any] = input_a[input_name]
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ : str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowerCamelCase__ : Optional[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def A_ ( self : Any , UpperCAmelCase : List[Any]=False ) -> Tuple:
def _inputs_have_equal_length(UpperCAmelCase : str ):
lowerCamelCase__ : List[Any] = len(input[0] )
for input_slice in input[1:]:
if len(SCREAMING_SNAKE_CASE_ ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : str , UpperCAmelCase : Tuple ):
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
return False
for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE_ ) , np.asarray(SCREAMING_SNAKE_CASE_ ) , atol=1e-3 ):
return False
return True
lowerCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[str] = feat_extract.model_input_names[0]
lowerCamelCase__ : Tuple = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowerCamelCase__ : Any = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Any = input_a[input_name]
lowerCamelCase__ : Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowerCamelCase__ : List[str] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
# truncate to smallest with np
lowerCamelCase__ : Any = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ : Any = input_a[input_name]
lowerCamelCase__ : List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowerCamelCase__ : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
# truncate to middle
lowerCamelCase__ : Optional[int] = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='np' , )
lowerCamelCase__ : Dict = input_a[input_name]
lowerCamelCase__ : List[Any] = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : int = input_a[input_name]
lowerCamelCase__ : Dict = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowerCamelCase__ : Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , truncation=SCREAMING_SNAKE_CASE_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowerCamelCase__ : List[Any] = 12
lowerCamelCase__ : int = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ : Union[str, Any] = input_a[input_name]
lowerCamelCase__ : int = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ : Tuple = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowerCamelCase__ : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowerCamelCase__ : Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) )
def A_ ( self : Union[str, Any] ) -> Dict:
self._check_padding(numpify=SCREAMING_SNAKE_CASE_ )
def A_ ( self : Tuple ) -> Tuple:
self._check_padding(numpify=SCREAMING_SNAKE_CASE_ )
def A_ ( self : List[str] ) -> Any:
self._check_truncation(numpify=SCREAMING_SNAKE_CASE_ )
def A_ ( self : Optional[Any] ) -> Dict:
self._check_truncation(numpify=SCREAMING_SNAKE_CASE_ )
@require_torch
def A_ ( self : int ) -> str:
lowerCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ : Optional[int] = feat_extract.model_input_names[0]
lowerCamelCase__ : int = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ : Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase__ : Optional[int] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def A_ ( self : Union[str, Any] ) -> Any:
lowerCamelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ : Tuple = feat_extract.model_input_names[0]
lowerCamelCase__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ : List[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )[input_name]
lowerCamelCase__ : Union[str, Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = self.feat_extract_dict
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ : Dict = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCamelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ : Any = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE_ )
def A_ ( self : Dict ) -> List[Any]:
lowerCamelCase__ : Optional[int] = self.feat_extract_dict
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : str = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
lowerCamelCase__ : int = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
lowerCamelCase__ : Optional[Any] = feat_extract.model_input_names[0]
lowerCamelCase__ : Tuple = BatchFeature({input_name: speech_inputs} )
lowerCamelCase__ : List[str] = min(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Any = feat_extract.pad(
SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
self.assertIn('attention_mask' , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 319 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case = '''base_with_context'''
def _A ( _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[f'''layers_{lyr_num}''']
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = ly_weight['''attention''']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _A ( _lowercase , _lowercase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase = weights[f'''layers_{lyr_num}''']
__UpperCamelCase = ly_weight['''attention''']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowercase )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase = weights[f'''layers_{lyr_num}''']
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = ly_weight['''self_attention''']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase = jnp.tree_util.tree_map(onp.array , __lowercase )
__UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__UpperCamelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__UpperCamelCase = inference.parse_training_gin_file(__lowercase , __lowercase )
__UpperCamelCase = inference.InferenceModel(args.checkpoint_path , __lowercase )
__UpperCamelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowercase )
__UpperCamelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowercase )
__UpperCamelCase = load_decoder(ta_checkpoint['target']['decoder'] , __lowercase )
__UpperCamelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__snake_case = parser.parse_args()
main(args)
| 310 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any:
A: Any = [x.strip() for x in open(__lowercase ).readlines()]
A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )]
A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase )
if save_path is not None:
save_json(__lowercase , __lowercase , indent=__lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 319 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a__ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
a__ = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a__ = field(default=UpperCAmelCase_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
a__ = field(
default=UpperCAmelCase_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
a__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __a ( ) -> int:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
__UpperCAmelCase = import_module('''tasks''' )
try:
__UpperCAmelCase = getattr(__lowercase , model_args.task_type )
__UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
__UpperCAmelCase = dict(enumerate(__lowercase ) )
__UpperCAmelCase = len(__lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowercase , idalabel=__lowercase , labelaid={label: i for i, label in enumerate(__lowercase )} , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
__UpperCAmelCase = np.argmax(__lowercase , axis=2 )
__UpperCAmelCase = preds.shape
__UpperCAmelCase = [[] for _ in range(__lowercase )]
__UpperCAmelCase = [[] for _ in range(__lowercase )]
for i in range(__lowercase ):
for j in range(__lowercase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowercase , __lowercase ),
"precision": precision_score(__lowercase , __lowercase ),
"recall": recall_score(__lowercase , __lowercase ),
"f1": fa_score(__lowercase , __lowercase ),
}
# Data collator
__UpperCAmelCase = DataCollatorWithPadding(__lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase = Trainer(
model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowercase , __lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowercase )
# Predict
if training_args.do_predict:
__UpperCAmelCase = TokenClassificationDataset(
token_classification_task=__lowercase , data_dir=data_args.data_dir , tokenizer=__lowercase , labels=__lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCAmelCase = trainer.predict(__lowercase )
__UpperCAmelCase = align_predictions(__lowercase , __lowercase )
__UpperCAmelCase = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowercase , __lowercase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
__UpperCAmelCase = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowercase , __lowercase , __lowercase )
return results
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list:
A: Dict = length or len(__lowercase )
A: Dict = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A , A: Tuple = list_data[i + 1], list_data[i]
A: Union[str, Any] = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.